Example #1
1
    def test_square_matrices_1(self):
        op4 = OP4()
        # matrices = op4.read_op4(os.path.join(op4Path, fname))
        form1 = 1
        form2 = 2
        form3 = 2
        from numpy import matrix, ones, reshape, arange

        A1 = matrix(ones((3, 3), dtype="float64"))
        A2 = reshape(arange(9, dtype="float64"), (3, 3))
        A3 = matrix(ones((1, 1), dtype="float32"))
        matrices = {"A1": (form1, A1), "A2": (form2, A2), "A3": (form3, A3)}

        for (is_binary, fname) in [(False, "small_ascii.op4"), (True, "small_binary.op4")]:
            op4_filename = os.path.join(op4Path, fname)
            op4.write_op4(op4_filename, matrices, name_order=None, precision="default", is_binary=False)
            matrices2 = op4.read_op4(op4_filename, precision="default")
            (form1b, A1b) = matrices2["A1"]
            (form2b, A2b) = matrices2["A2"]
            self.assertEqual(form1, form1b)
            self.assertEqual(form2, form2b)

            (form1b, A1b) = matrices2["A1"]
            (form2b, A2b) = matrices2["A2"]
            (form3b, A3b) = matrices2["A3"]
            self.assertEqual(form1, form1b)
            self.assertEqual(form2, form2b)
            self.assertEqual(form3, form3b)

            self.assertTrue(array_equal(A1, A1b))
            self.assertTrue(array_equal(A2, A2b))
            self.assertTrue(array_equal(A3, A3b))
            del A1b, A2b, A3b
            del form1b, form2b, form3b
Example #2
0
File: P3.py Project: MilosAtz/NE155
def flux_fdm(h,a,D,sig_a,S):
	# First, we determine the number of cells and points.
	n_cell = int((a-(-1*a))/h)
	n_points = n_cell+1
#####################################################################################################
# We want to set up the system Ax=b, where A is a tridiagonal matrix and x contains the flux at each point. Because S is constant, the vector b will be constant as well.
	b=np.zeros(n_cell-1)
	for i in range(0, n_cell-1):
		b[i]=S*h**2/D
	b=np.transpose(np.matrix(b))
#####################################################################################################
# A is made up out of the coefficients for flux; A is a tridiagonal matrix. The inputs a, b, and c allow for the input of those coefficients.
	A_a=[-1]*int(n_cell-2)
	A_b=[2+(sig_a*h**2/D)]*int(n_cell-1)
	A_c=[-1]*int(n_cell-2)
	A=np.matrix(np.diag(A_a, -1) + np.diag(A_b, 0) + np.diag(A_c, 1))
#####################################################################################################
# Utilize the Thomas method to solve the system of equations
	phi = [0]*n_points
	for i in range(1,n_cell-1):
		A[i,i] = A[i,i]-(A[i,i-1]/A[i-1,i-1])*A[i-1,i]
		b[i]=b[i]-(A[i,i-1]/A[i-1,i-1])*b[i-1]
	phi[n_cell-1]=b[n_cell-2]/A[n_cell-2,n_cell-2]
	for i in range(n_cell-3, -1, -1):
		phi[i+1]=(b[i]-A[i,i+1]*phi[i+2])/A[i,i]
	return(phi)
Example #3
0
  def compute_distances_no_loops(self, X):
    """
    Compute the distance between each test point in X and each training point
    in self.X_train using no explicit loops.

    Input / Output: Same as compute_distances_two_loops
    """
    num_test = X.shape[0]
    num_train = self.X_train.shape[0]
    dists = np.zeros((num_test, num_train)) 
    #########################################################################
    # TODO:                                                                 #
    # Compute the l2 distance between all test points and all training      #
    # points without using any explicit loops, and store the result in      #
    # dists.                                                                #
    # HINT: Try to formulate the l2 distance using matrix multiplication    #
    #       and two broadcast sums.                                         #
    #########################################################################
    num_feature = X.shape[1]
    A = np.matrix(X);
    B = np.matrix(self.X_train);
    ImT = np.ones((num_feature,num_train));
    Itm = np.ones((num_test,num_feature));
    sums_AB = np.power(A,2)*ImT + Itm*np.power(B.T,2);
    prod_AB = A*B.T;
    dists = np.power(sums_AB - 2*prod_AB,0.5);
    #########################################################################
    #                         END OF YOUR CODE                              #
    #########################################################################
    return dists
Example #4
0
def test_reparametrization():
    """ Here we define the tests for reparametrizer of our ArcLengthParametrizer, we try it with a half a 
    circle and a fan. 
    We test it both in 2d and 3d."""
    R = 1
    P = 1
    toll = 1.e-6

    n = 10
    ii = [0.5,0.8,4,6,7,8,9,9.6,10,10.1,11]
    control_points_3d = np.asmatrix(np.zeros([n+1,3]))#[np.array([R*np.cos(5*i * np.pi / (n + 1)), R*np.sin(5*i * np.pi / (n + 1)), P * i]) for i in range(0, n+1)]
    control_points_3d[:,0] = np.transpose(np.matrix([R*np.cos(5*i * np.pi / (n + 1))for i in ii]))
    control_points_3d[:,1] = np.transpose(np.matrix([R*np.sin(5*i * np.pi / (n + 1))for i in ii]))
    control_points_3d[:,2] = np.transpose(np.matrix([P*i for i in range(n+1)]))
    #control_points_3d[3,:] += 32
    #print control_points_3d[0]
    vsl = AffineVectorSpace(UniformLagrangeVectorSpace(n+1),0,1)
    arky = ArcLengthParametrizer(vsl, control_points_3d)
    new_control_points_3d = arky.reparametrize()

    new_arky = ArcLengthParametrizer(vsl, new_control_points_3d)
    new_new_control_points_3d = arky.reparametrize()
    tt = np.linspace(0, 1, 128)

    new_new_vals = vsl.element(new_new_control_points_3d)(tt)
    #print vals
    new_vals = vsl.element(new_control_points_3d)(tt)
    #print vals.shape, new_vals.shape
    assert np.amax(np.abs(new_new_vals-new_vals)) < toll
Example #5
0
def test_ohess():
    """Simple test of ohess matrix."""
    n = 10
    a = rogues.ohess(n)
    # Test to see if a is orthogonal...
    b = np.matrix(a) * np.matrix(a.T)
    assert(np.allclose(b, np.eye(n)))
Example #6
0
    def __init__(self, x_m, y_m, heading_d=None):
        if heading_d is None:
            heading_d = 0.0
        self._estimates = numpy.matrix(
            # x m, y m, heading d, speed m/s
            [x_m, y_m, heading_d, 0.0]
        ).transpose()  # x

        # This will be populated as the filter runs
        # TODO: Ideally, this should be initialized to those values, for right
        # now, identity matrix is fine
        self._covariance_matrix = numpy.matrix([  # P
            [1, 0, 0, 0],
            [0, 1, 0, 0],
            [0, 0, 1, 0],
            [0, 0, 0, 1],
        ])
        # TODO: Tune this parameter for maximum performance
        self._process_noise = numpy.matrix([  # Q
            [1, 0, 0, 0],
            [0, 1, 0, 0],
            [0, 0, 1, 0],
            [0, 0, 0, 1],
        ])

        self._last_observation_s = time.time()
        self._estimated_turn_rate_d_s = 0.0
  def __init__(self, left_low=True, right_low=True, name="VelocityDrivetrainModel"):
    super(VelocityDrivetrainModel, self).__init__(name)
    self._drivetrain = drivetrain.Drivetrain(left_low=left_low,
                                             right_low=right_low)
    self.dt = 0.005
    self.A_continuous = numpy.matrix(
        [[self._drivetrain.A_continuous[1, 1], self._drivetrain.A_continuous[1, 3]],
         [self._drivetrain.A_continuous[3, 1], self._drivetrain.A_continuous[3, 3]]])

    self.B_continuous = numpy.matrix(
        [[self._drivetrain.B_continuous[1, 0], self._drivetrain.B_continuous[1, 1]],
         [self._drivetrain.B_continuous[3, 0], self._drivetrain.B_continuous[3, 1]]])
    self.C = numpy.matrix(numpy.eye(2))
    self.D = numpy.matrix(numpy.zeros((2, 2)))

    self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,
                                               self.B_continuous, self.dt)

    # FF * X = U (steady state)
    self.FF = self.B.I * (numpy.eye(2) - self.A)

    self.PlaceControllerPoles([0.67, 0.67])
    self.PlaceObserverPoles([0.02, 0.02])

    self.G_high = self._drivetrain.G_high
    self.G_low = self._drivetrain.G_low
    self.resistance = self._drivetrain.resistance
    self.r = self._drivetrain.r
    self.Kv = self._drivetrain.Kv
    self.Kt = self._drivetrain.Kt

    self.U_max = self._drivetrain.U_max
    self.U_min = self._drivetrain.U_min
Example #8
0
def test_arclength_half_circle():
    """ Here we define the tests for the lenght computer of our ArcLengthParametrizer, we try it with a half a 
    circle and a fan. 
    We test it both in 2d and 3d."""


    # Number of interpolation points minus one
    n = 5
    toll = 1.e-6
    points = np.linspace(0, 1, (n+1) ) 
    R = 1
    P = 1
    control_points_2d = np.asmatrix(np.zeros([n+1,2]))#[np.array([R*np.cos(5*i * np.pi / (n + 1)), R*np.sin(5*i * np.pi / (n + 1)), P * i]) for i in range(0, n+1)]
    control_points_2d[:,0] = np.transpose(np.matrix([R*np.cos(1 * i * np.pi / (n + 1))for i in range(n+1)]))
    control_points_2d[:,1] = np.transpose(np.matrix([R*np.sin(1 * i * np.pi / (n + 1))for i in range(n+1)]))

    control_points_3d = np.asmatrix(np.zeros([n+1,3]))#[np.array([R*np.cos(5*i * np.pi / (n + 1)), R*np.sin(5*i * np.pi / (n + 1)), P * i]) for i in range(0, n+1)]
    control_points_3d[:,0] = np.transpose(np.matrix([R*np.cos(1 * i * np.pi / (n + 1))for i in range(n+1)]))
    control_points_3d[:,1] = np.transpose(np.matrix([R*np.sin(1 * i * np.pi / (n + 1))for i in range(n+1)]))
    control_points_3d[:,2] = np.transpose(np.matrix([P*i for i in range(n+1)]))

    vsl = AffineVectorSpace(UniformLagrangeVectorSpace(n+1),0,1)
    dummy_arky_2d = ArcLengthParametrizer(vsl, control_points_2d)
    dummy_arky_3d = ArcLengthParametrizer(vsl, control_points_3d)
    length2d = dummy_arky_2d.compute_arclength()[-1,1]
    length3d = dummy_arky_3d.compute_arclength()[-1,1]
#    print (length2d)
#    print (n * np.sqrt(2))
    l2 = np.pi * R
    l3 = 2 * np.pi * np.sqrt(R * R + (P / (2 * np.pi)) * (P / (2 * np.pi)))
    print (length2d, l2)
    print (length3d, l3)
    assert (length2d - l2) < toll
    assert (length3d - l3) < toll
Example #9
0
def dX_dt_template(C):
    '''Create a function that scipy.integrate.odeint can use for LV models.
    The interaction matrix specifies the coefficients for each term in the 
    derivative of the given species population. All equations take the form of:
    dxi/dt = xi * (alpha_i + ci1*x1 + ci2*x2 + ci3*x3 + ... ciixi + ... cin*xn) 
    coefs = 0 imply no direct interaction between species k and species i.
    Inputs:
     interaction_matrix - nXn+1 array.
    Outputs: 
     A function which calculates the dxi/dt's for all species in the simulation.
     Function is in proper format to be utilized by scipy's ode integrate. 
    '''
    # C = coefficient of interaction matrix, size nX(n+1)
    # C = [alpha_1, c11, c12, ..., c1n]
    #     [   .   ,  . ,  . , ..., c2n]
    #     [   .   ,  . ,  . , ...,  . ]
    #     [   .   ,  . ,  . , ...,  . ]
    #     [alpha_n, cn1, c12, ..., cnn]
    # X = column vector of the value of each of the n species
    # X = [x1, x2, ..., xn]
    # Y = [1, x1, x2, ..., xn] 
    # This function returns:
    # X <*> (C*Y)  
    # where * is matrix multiplication and <*> is elementwise multiplication
    return lambda X, t=0: X*array(matrix(C)*matrix(hstack((array([1]),X))).T).reshape(len(X))
Example #10
0
def cline(qx,qy,h):
    """Finds the center-line flow in the channel, i.e. the path of the maximum flow rate in a     channel. It uses quadratic interpolation to find the exact location where the flow rate is max    imum between two pixels.

    Usage: cline(qx_data, qy_data, h_data)
    """
    tx,ty = ida.tipcoord(h)
    print tx,ty
    nx, ny = qx.shape[0], qx.shape[1]
    Q = np.sqrt(np.matrix(qx**2.0 + qy**2.0))
    Qmax = np.zeros(tx)
    ymax = np.zeros(tx)
    ymax2 = np.zeros(tx)
    for x in range(tx):
        Qmax[x] = Q[x,:].max()
        for y in range(ny):
            if Q[x,y] == Qmax[x]:
                ymax[x] = y
        A = np.matrix([[(ymax[x]-1)**2,ymax[x]-1,1],[(ymax[x])**2,ymax[x],1],[(ymax[x]+1)**2,ymax[x]+1,1]])
        B = np.matrix([[(Q[x,(ymax[x]-1)])],[(Q[x,(ymax[x])])],[(Q[x,(ymax[x]+1)])]])
        X = np.linalg.solve(A,B)
        ymax2[x] = (-X[1]/(2*X[0]))
    plt.plot(ymax2,Qmax)
    #plt.axis([0,h.shape[0],ymax2[0]-5,ymax2[0]+5])
    plt.show()
    return ymax2
def svdUpdate(U, S, V, a, b):
    """
    Update SVD of an (m x n) matrix `X = U * S * V^T` so that
    `[X + a * b^T] = U' * S' * V'^T`
    and return `U'`, `S'`, `V'`.
    
    `a` and `b` are (m, 1) and (n, 1) rank-1 matrices, so that svdUpdate can simulate 
    incremental addition of one new document and/or term to an already existing 
    decomposition.
    """
    rank = U.shape[1]
    m = U.T * a
    p = a - U * m
    Ra = numpy.sqrt(p.T * p)
    assert float(Ra) > 1e-10
    P = (1.0 / float(Ra)) * p
    n = V.T * b
    q = b - V * n
    Rb = numpy.sqrt(q.T * q)
    assert float(Rb) > 1e-10
    Q = (1.0 / float(Rb)) * q

    K = numpy.matrix(numpy.diag(list(numpy.diag(S)) + [0.0])) + numpy.bmat("m ; Ra") * numpy.bmat(" n; Rb").T
    u, s, vt = numpy.linalg.svd(K, full_matrices=False)
    tUp = numpy.matrix(u[:, :rank])
    tVp = numpy.matrix(vt.T[:, :rank])
    tSp = numpy.matrix(numpy.diag(s[:rank]))
    Up = numpy.bmat("U P") * tUp
    Vp = numpy.bmat("V Q") * tVp
    Sp = tSp
    return Up, Sp, Vp
Example #12
0
    def __init__(self, mol, mints):
        """
        Initialize the rhf
        :param mol: a psi4 molecule object
        :param mints: a molecular integrals object (from MintsHelper)
        """
        self.mol = mol
        self.mints = mints

        self.V_nuc = mol.nuclear_repulsion_energy()
        self.T = np.matrix(mints.ao_kinetic())
        self.S = np.matrix(mints.ao_overlap())
        self.V = np.matrix(mints.ao_potential())

        self.g = np.array(mints.ao_eri())

        # Determine the number of electrons and the number of doubly occupied orbitals
        self.nelec = -mol.molecular_charge()
        for A in range(mol.natom()):
            self.nelec += int(mol.Z(A))
        if mol.multiplicity() != 1 or self.nelec % 2:
            raise Exception("This code only allows closed-shell molecules")
        self.ndocc = self.nelec / 2

        self.maxiter = psi4.get_global_option('MAXITER')
        self.e_convergence = psi4.get_global_option('E_CONVERGENCE')

        self.nbf = mints.basisset().nbf()
Example #13
0
def broyden1_modified(F, xin, iter=10, alpha=0.1, verbose = False):
    """Broyden's first method, modified by O. Certik.

    Updates inverse Jacobian using some matrix identities at every iteration,
    its faster then newton_slow, but still not optimal.

    The best norm |F(x)|=0.005 achieved in ~45 iterations.
    """
    def inv(A,u,v):

        #interesting is that this 
        #return (A.I+u*v.T).I
        #is more stable than
        #return A-A*u*v.T*A/float(1+v.T*A*u)
        Au=A*u
        return A-Au*(v.T*A)/float(1+v.T*Au)
    xm=numpy.matrix(xin).T
    Fxm=myF(F,xm)
    Jm=alpha*numpy.matrix(numpy.identity(len(xin)))
    for n in range(iter):
        deltaxm=Jm*Fxm
        xm=xm+deltaxm
        Fxm1=myF(F,xm)
        deltaFxm=Fxm1-Fxm
        Fxm=Fxm1
#        print "-------------",norm(deltaFxm),norm(deltaxm)
        deltaFxm/=norm(deltaxm)
        deltaxm/=norm(deltaxm)
        Jm=inv(Jm+deltaxm*deltaxm.T*Jm,-deltaFxm,deltaxm)
        
        if verbose:
            print "%d:  |F(x)|=%.3f"%(n, norm(Fxm))
    return xm
Example #14
0
def manova1_single_node(Y, GROUP):
	### assemble counts:
	u           = np.unique(GROUP)
	nGroups     = u.size
	nResponses  = Y.shape[0]
	nComponents = Y.shape[1]
	### create design matrix:
	X           = np.zeros((nResponses, nGroups))
	ind0        = 0
	for i,uu in enumerate(u):
		n       = (GROUP==uu).sum()
		X[ind0:ind0+n, i] = 1
		ind0   += n
	### SS for original design:
	Y,X   = np.matrix(Y), np.matrix(X)
	b     = np.linalg.pinv(X)*Y
	R     = Y - X*b
	R     = R.T*R
	### SS for reduced design:
	X0    = np.matrix(  np.ones(Y.shape[0])  ).T
	b0    = np.linalg.pinv(X0)*Y
	R0    = Y - X0*b0
	R0    = R0.T*R0
	### Wilk's lambda:
	lam   = np.linalg.det(R) / (np.linalg.det(R0) + eps)
	### test statistic:
	N,p,k = float(nResponses), float(nComponents), float(nGroups)
	x2    = -((N-1) - 0.5*(p+k)) * log(lam)
	df    = p*(k-1)
	# return lam, x2, df
	return x2
Example #15
0
 def calcPCA(self, data):
     data -= np.mean(data, axis=0)
     # data = data / np.std(data, axis=0)
     c = np.cov(data, rowvar=0)
     values, vectors = la.eig(c)
     featureVector = vectors[:, [values.tolist().index(x) for x in np.sort(values)[::-1]]]
     return (np.matrix(featureVector) * np.matrix(data.T)).T
def get_derivatives(sample_df,delta_t):
	bid_price_names=[]
	bid_size_names=[]
	ask_price_names=[]
	ask_size_names=[]
	ask_price_derivative_names=[]
	ask_size_derivative_names=[]
	bid_price_derivative_names=[]
	bid_size_derivative_names=[]
	for i in range(1,11):
		bid_price_names.append("bid_price"+str(i))
		bid_size_names.append('bid_size'+str(i))
		ask_price_names.append('ask_price'+str(i))
		ask_size_names.append("ask_size"+str(i))
		ask_price_derivative_names.append('ask_price_derivative'+str(i))
		ask_size_derivative_names.append('ask_size_derivative'+str(i))
		bid_price_derivative_names.append('bid_price_derivative'+str(i))
		bid_size_derivative_names.append('bid_size_derivative'+str(i))
	original_df=sample_df[ask_price_names+ask_size_names+bid_price_names+bid_size_names][:(sample_df.shape[0]-delta_t)]
	shift_df=sample_df[ask_price_names+ask_size_names+bid_price_names+bid_size_names][delta_t:]
	derivative_df=pd.DataFrame((np.matrix(shift_df)-np.matrix(original_df))/delta_t)
#	derivative_df=pd.concat([pd.DataFrame(np.array(np.nan).repeat(delta_t*derivative_df.shape[1]).reshape((delta_t, derivative_df.shape[1]))), derivative_df])
#	time_index_sub=sample_df[['Index','Time']][delta_t:]
	derivative_df.index=[i for i in range(delta_t,len(sample_df))]
	time_index_sub=sample_df[['Index','Time']][delta_t:]
	derivative_df.index = time_index_sub.index
	derivative_df=pd.concat([time_index_sub,derivative_df],axis=1)
	derivative_df.columns=['Index','Time']+ask_price_derivative_names+ask_size_derivative_names+bid_price_derivative_names+bid_size_derivative_names
	return(derivative_df)
Example #17
0
	def derivadaCusto(self, x, y):
		# Calcula a derivada em função de W1 e W2
		self.yEstimado = self.propaga(x)
		matrix_x = np.matrix(list(x.values()))
		if(self.tamInput > 1):
			matrix_x = matrix_x.T
		matrix_y = np.matrix(list(y.values()))
		# erro a ser retropropagado
		ek = -np.subtract(matrix_y, self.yEstimado)
		'''
		print("Erro k:")
		print(ek.shape)
		print(ek)
		print("Derivada sigmoid YIN : ", self.derivadaSigmoide(self.yin).shape)
		print(self.derivadaSigmoide(self.yin))
		'''
		delta3 = np.multiply(ek, self.derivadaPrelu(self.yin))#self.derivadaSigmoide(self.yin))
		# Obtém o erro a ser retropropagado de cada camada, multiplicando pela derivada da função de ativação
		#adicionando o termo de regularização no gradiente (+lambda * pesos)
		dJdW2 = np.dot(delta3, self.zin) + self.lambdaVal*self.W2
		'''
		print("dJdW2 ------------ ", dJdW2.shape)
		print(dJdW2)
		print("Z shape:", self.z.shape)
		print(self.z)
		print("Derivada Z shape:", self.derivadaSigmoide(self.z).shape)
		print(self.derivadaSigmoide(self.z))
		print("W2 shape", self.W2.shape)
		print(self.W2)
		'''
		delta2 = np.multiply(np.dot(self.W2, delta3).T, self.derivadaSigmoide(self.z))
		dJdW1 = np.dot(matrix_x, delta2) + self.lambdaVal*self.W1
		return dJdW1, dJdW2
Example #18
0
    def test_chebyshev_center(self):
        # The goal is to find the largest Euclidean ball (i.e. its center and
        # radius) that lies in a polyhedron described by linear inequalites in this
        # fashion: P = {x : a_i'*x <= b_i, i=1,...,m} where x is in R^2

        # Generate the input data
        a1 = np.matrix("2; 1")
        a2 = np.matrix(" 2; -1")
        a3 = np.matrix("-1;  2")
        a4 = np.matrix("-1; -2")
        b = np.ones(4)

        # Create and solve the model
        r = Variable(name='r')
        x_c = Variable(2,name='x_c')
        obj = Maximize(r)
        constraints = [ #TODO have atoms compute values for constants.
            a1.T*x_c + np.linalg.norm(a1)*r <= b[0],
            a2.T*x_c + np.linalg.norm(a2)*r <= b[1],
            a3.T*x_c + np.linalg.norm(a3)*r <= b[2],
            a4.T*x_c + np.linalg.norm(a4)*r <= b[3],
        ]

        p = Problem(obj, constraints)
        result = p.solve()
        self.assertAlmostEqual(result, 0.447214)
        self.assertAlmostEqual(r.value, result)
        self.assertItemsAlmostEqual(x_c.value, [0,0])
def RMSE(mat_predict, mat_true):
	
	sha_predict = mat_predict.shape
	sha_true = mat_true.shape

	if sha_true != sha_predict:
		print('error! yay!')
		return 0

	summy = float(0.0)
	count = float(0.0)

	# set up the data frame for outputting
	predict_out = np.matrix([[0,0,0]])

	# you only care about the non-null values of mat_true
	for i in xrange(0,numusers):
		for j in xrange(0,nummovies):
			if mat_true[i,j] != 0:
				count = count + 1
				summy = summy + math.pow((mat_true[i,j] - mat_predict[i,j]),2)

				# add to the output matrix
				predict_out = np.vstack((predict_out,np.matrix([i+1,j+1,mat_predict.item(i,j)])))

	# complete the equation
	RSME_value = math.pow(summy/count,0.5)

	# return it after deleting the first rwo etc
	predict_out = np.delete(predict_out,(0),axis = 0)
	
	return RSME_value, predict_out
Example #20
0
def least_squares(data):
    """The least squares method."""
    x = NP.matrix([(a, 1) for (a, b) in data])
    xt = NP.transpose(x)
    y = NP.matrix([[b] for (a, b) in data])
    [a,c] = NP.dot(NP.linalg.inv(NP.dot(xt, x)), xt).dot(y).flat
    return (a, -1, c)
def window_fn_matrix(Q,N,num_remov=None,save_tag=None,lms=None):
    Q = n.matrix(Q); N = n.matrix(N)
    Ninv = uf.pseudo_inverse(N,num_remov=None) # XXX want to remove dynamically
    #print Ninv 
    info = n.dot(Q.H,n.dot(Ninv,Q))
    M = uf.pseudo_inverse(info,num_remov=num_remov)
    W = n.dot(M,info)

    if save_tag!=None:
        foo = W[0,:]
        foo = n.real(n.array(foo))
        foo.shape = (foo.shape[1]),
        print foo.shape
        p.scatter(lms[:,0],foo,c=lms[:,1],cmap=mpl.cm.PiYG,s=50)
        p.xlabel('l (color is m)')
        p.ylabel('W_0,lm')
        p.title('First Row of Window Function Matrix')
        p.colorbar()
        p.savefig('{0}/{1}_W.pdf'.format(fig_loc,save_tag))
        p.clf()

        print 'W ',W.shape
        p.imshow(n.real(W))
        p.title('Window Function Matrix')
        p.colorbar()
        p.savefig('{0}/{1}_W_im.pdf'.format(fig_loc,save_tag))
        p.clf()


    return W
Example #22
0
 def test_get_relative_transformation_pasteboard(self):
     """
     Test get_relative_transformation() relative to the pasteboard
     """
     self.assertTrue(numpy.all(
         doc.get_relative_transformation() == 
         numpy.identity(3)
     ))
     spread = doc.get_children('Spread')[1]
     self.assertTrue(numpy.all(
         spread.get_relative_transformation() == 
         numpy.matrix([
             [1, 0, 0],
             [0, 1, 0],
             [0, 1200.472440944882, 1],
         ])
     ))
     page_item = spread.get_children('TextFrame')[0]
     self.assertTrue(numpy.all(
         page_item.get_relative_transformation() == 
         numpy.matrix([
             [1, 0, 0],
             [0, 1, 0],
             [401.10236220472433, 941.96692913385834, 1],
         ])
     ))
Example #23
0
def findClosestPointInB(b_data, a, offset):

	xd = offset[0]
	yd = offset[1]
	theta = offset[2]

	T = numpy.matrix([	[math.cos(theta), -math.sin(theta), xd],
			[math.sin(theta), math.cos(theta), yd],
			[0.0, 0.0, 1.0]
		    ])


	a_hom = numpy.matrix([[a[0]],[a[1]],[1.0]])
	temp = T*a_hom
	a_off = [temp[0,0],temp[1,0]]

	minDist = 1e100
	minPoint = None

	for p in b_data:

	 	dist = math.sqrt((p[0]-a_off[0])**2 + (p[1]-a_off[1])**2)
		if dist < minDist:
			minPoint = copy(p)
			minDist = dist


	if minPoint != None:
		return minPoint, minDist
	else:
		raise
Example #24
0
def load_matlab_matrix( matfile, matname=None ):
    """
    Wraps scipy.io.loadmat.

    If matname provided, returns np.ndarray representing the index
    map. Otherwise, the full dict provided by loadmat is returns.
    """
    if not matname:
        out = spio.loadmat( matfile )
        mat = _extract_mat( out )
        # if mat is a sparse matrix, convert it to numpy matrix
        try:
            mat = np.matrix( mat.toarray() )
        except AttributeError:
            mat = np.matrix( mat )
        return mat
    else:
        matdict = spio.loadmat( matfile )
        mat = matdict[ matname ]
        # if mat is a sparse matrix, convert it to numpy matrix
        try:
            mat = np.matrix( mat.toarray() )
        except AttributeError:
            mat = np.matrix( mat )
        return mat #np.matrix( mat[ matname ] )
Example #25
0
 def __init__(self):
     self._position = numpy.zeros((2,))
     self._position_frozen = False
     self._matrix = numpy.matrix(numpy.identity(3, numpy.float64))
     self._temp_matrix = numpy.matrix(numpy.identity(3, numpy.float64))
     self._selected = False
     self._scene = None
Example #26
0
    def predict(self, x_star):
        """
        Predict the process's values on the input values

        @arg x_star: Prediction points

        @return: ( mean, variance, LL )
        where mean are the predicted means, variance are the predicted
        variances and LL is the log likelihood of the data for the given
        value of the parameters (i.e. not integrating over hyperparameters)
        """
        from numpy.linalg import solve
        import types
        # print 'Predicting'
        if 0 == len(self.X):
            f_star_mean = matrix(zeros((len(x_star), 1), numpy.float64))
            v = matrix(zeros((0, len(x_star)), numpy.float64))
        else:
            k_star = self.calc_covariance(self.X, x_star)
            f_star_mean = k_star.T * self._alpha
            if 0 == len(x_star):  # no training data
                v = matrix(zeros((0, len(x_star)), numpy.float64))
            else:
                v = solve(self._L, k_star)
        V_f_star = self.calc_covariance(x_star) - v.T * v
        # print 'Done predicting'
        # import IPython; IPython.Debugger.Pdb().set_trace()
        return (f_star_mean, V_f_star, self.LL)
Example #27
0
 def _update(self):
     """
     Calculate those terms for prediction that do not depend on predictive
     inputs.
     """
     from numpy.linalg import cholesky, solve, LinAlgError
     from numpy import transpose, eye, matrix
     import types
     self._K = self.calc_covariance(self.X)
     if not self._K.shape[0]:  # we didn't have any data
         self._L = matrix(zeros((0, 0), numpy.float64))
         self._alpha = matrix(zeros((0, 1), numpy.float64))
         self.LL = 0.
     else:
         try:
             self._L = matrix(cholesky(self._K))
         except LinAlgError as detail:
             raise RuntimeError("""Cholesky decomposition of covariance """
                                """matrix failed. Your kernel may not be positive """
                                """definite. Scipy complained: %s""" % detail)
         self._alpha = solve(self._L.T, solve(self._L, self.y))
         self.LL = (
             - self.n * math.log(2.0 * math.pi)
             - (self.y.T * self._alpha)[0, 0]
         ) / 2.0
     # print self.LL
     # import IPython; IPython.Debugger.Pdb().set_trace()
     self.LL -= log(diagonal(self._L)).sum()
Example #28
0
def test_pascal_1():
    """Simple test of pascal matrix: k = 1."""
    # Notice we recover the unit matrix with n = 18, better than previous test
    n = 18
    a = rogues.pascal(n, 1)
    b = np.matrix(a) * np.matrix(a)
    assert(np.allclose(b, np.eye(n)))
def main():

    sample='q'
    sm_bin='10.0_10.5'
    catalogue = 'sm_9.5_s0.2_sfr_c-0.75_250'

    #load in fiducial mock
    filepath = './'
    filename = 'sm_9.5_s0.2_sfr_c-0.8_Chinchilla_250_wp_fiducial_'+sample+'_'+sm_bin+'_cov.npy'
    cov = np.matrix(np.load(filepath+filename))
    diag = np.diagonal(cov)
    filepath = cu.get_output_path() + 'analysis/central_quenching/observables/'
    filename = 'sm_9.5_s0.2_sfr_c-0.8_Chinchilla_250_wp_fiducial_'+sample+'_'+sm_bin+'.dat'
    data = ascii.read(filepath+filename)
    rbins = np.array(data['r'])
    mu = np.array(data['wp'])
    
    #load in comparison mock
    
    
    
    
    plt.figure()
    plt.errorbar(rbins, mu, yerr=np.sqrt(np.diagonal(cov)), color='black')
    plt.plot(rbins, wp,  color='red')
    plt.xscale('log')
    plt.yscale('log')
    plt.show()
    
    inv_cov = cov.I
    Y = np.matrix((wp-mu))
    
    X = Y*inv_cov*Y.T
    
    print(X)
def get_system_model():

    A = np.matrix([[DT, 1.0],
                   [0.0, DT]])
    B = np.matrix([0.0, 1.0]).T

    return A, B
        # uma casa para direita 
        if column < self.num_blocks-1: 
            new_col=column+1
            newSolution = np.copy(current)
            TargetBlock = newSolution[row,new_col]
            newSolution[row,new_col]=0
            newSolution[row,column] = TargetBlock
            newSolutions.append(newSolution)
       
        return  newSolutions


if __name__ == '__main__':
    
    #Creating a target for the game
    target=np.matrix([[1, 2, 3],[4, 5, 6],[7, 8, 0]])
    print('Target:\n%s'%target)
    
    start=np.matrix([[4, 6, 3],[1, 0, 8],[7, 2, 5]])
    #start=np.matrix([[1, 2, 3],[4, 5, 6],[7, 0, 8]])
    print('Start:\n%s'%start)
        
    #Creating an problem object based on FindPath class
    Problema = SlidingPuzzle(3)
     
    #Creating an object for breadth first search algorithm for ``FindPath`` problem
    SearchObj = breadth_first_search(Problema)    
     
     
    #Finding solution
    solution,visited = SearchObj.search(start,target)
Pc_in = PropsSI('P','T', flowInputs['Tc_in'], 'Q', flowInputs['xc_in'], opCond['FluidType'])


opCond['mfr_h'] = 20.0 #mfr_hGuess
opCond['mdot_h'] = opCond['mfr_h']/(geom['N']*math.pi*0.25*(geom['D']-2*geom['t'])**2)
opCond['mdot_c'] = opCond['mfr_c']/(geom['Nt_col']*geom['s']*geom['L'])

print(opCond['mdot_h'])


'''
#### Initialization ####
'''
np.set_printoptions(precision=2)
# Matrix allocation for every thermodynamical variable
Th = np.matrix([[0.0 for x in range(geom['n'] + 1)] for y in range(geom['Nt'] + 1)] )
Tc = np.matrix([[0.0 for x in range(geom['n'] + 1)] for y in range(geom['Nt'] + 1)] )
Ph = np.matrix([[1e5 for x in range(geom['n'] + 1)] for y in range(geom['Nt'] + 1)] )
Pc = np.matrix([[1e5 for x in range(geom['n'] + 1)] for y in range(geom['Nt'] + 1)] )
eps = np.matrix([[0.0 for x in range(geom['n'] + 1)] for y in range(geom['Nt'] + 1)] )
xc = np.matrix([[0.0 for x in range(geom['n'] + 1)] for y in range(geom['Nt'] + 1)] )
OtherData = np.matrix([[{} for x in range(geom['n'] + 1)] for y in range(geom['Nt'] + 1)] )


# Initialization of the first row and column
# Non-used cells in the matrix are set with '-1'
Tc[0,:] = flowInputs['Tc_in']
Tc[:,0] = -1
Th[:,0] = flowInputs['Th_in']
Th[0,:] = -1
Example #33
0
def ecol(data, correlated=None, err_cov=None, abs_est=True):
    """
    Extended collocation analysis to obtain estimates of:
        - signal variances
        - error variances
        - signal-to-noise ratios [dB]
        - error cross-covariances (and -correlations)
    based on an arbitrary number of N>3 data sets.

    !!! EACH DATA SET MUST BE MEMBER OF >= 1 TRIPLET THAT FULFILLS THE CLASSICAL TRIPLE COLLOCATION ASSUMPTIONS !!!

    Parameters
    ----------
    data : pd.DataFrame
        Temporally matched input data sets in each column
    correlated : tuple of tuples (string)
        A tuple containing tuples of data set names (column names), between
        which the error cross-correlation shall be estimated.
        e.g. [['AMSR-E','SMOS'],['GLDAS','ERA']] estimates error cross-correlations
        between (AMSR-E and SMOS), and (GLDAS and ERA), respectively.
    err_cov :
        A priori known error cross-covariances that shall be included
        in the estimation (to obtain unbiased estimates)
    abs_est :
        Force absolute values for signal and error variance estimates
        (to mitiate the issue of estimation uncertainties)

    Returns
    -------
    A dictionary with the following entries (<name> correspond to data set (df column's) names:
    - sig_<name> : signal variance of <name>
    - err_<name> : error variance of <name>
    - snr_<name> : SNR (in dB) of <name>
    - err_cov_<name1>_<name2> : error covariance between <name1> and <name2>
    - err_corr_<name1>_<name2> : error correlation between <name1> and <name2>

    Notes
    -----
    Rescaling parameters can be derived from the signal variances
    e.g., scaling <src> against <ref>:
    beta =  np.sqrt(sig_<ref> / sig_<src>)
    rescaled = (data[<src>] - data[<src>].mean()) * beta + data[<ref>].mean()

    References
    ----------
    .. [Gruber2016] Gruber, A., Su, C. H., Crow, W. T., Zwieback, S., Dorigo, W. A., & Wagner, W. (2016). Estimating error
    cross-correlations in soil moisture data sets using extended collocation analysis. Journal of Geophysical
    Research: Atmospheres, 121(3), 1208-1219.
    """

    data.dropna(inplace=True)

    cols = data.columns.values
    cov = data.cov()

    # subtract a-priori known error covariances to obtained unbiased estimates
    if err_cov is not None:
        cov[err_cov[0]][err_cov[1]] -= err_cov[2]
        cov[err_cov[1]][err_cov[0]] -= err_cov[2]

    # ----- Building up the observation vector y and the design matrix A -----

    # Initial lenght of the parameter vector x:
    # n error variances + n signal variances
    n_x = 2 * len(cols)

    # First n elements in y: variances of all data sets
    y = cov.values.diagonal()

    # Extend y if data sets with correlated errors exist
    if correlated is not None:
        # additionally estimated in x:
        # k error covariances, and k cross-biased signal variances
        # (biased with the respective beta_i*beta_j)
        n_x += 2 * len(correlated)

        # add covariances between the correlated data sets to the y vector
        y = np.hstack((y, [cov[ds[0]][ds[1]] for ds in correlated]))

    # Generate the first part of the design matrix A (total variance = signal variance + error variance)
    A = np.hstack((np.matrix(np.identity(int(n_x / 2))),
                   np.matrix(np.identity(int(n_x / 2))))).astype('int')

    # build up A and y components for estimating signal variances (biased with beta_i**2 only)
    # i.e., the classical TC based signal variance estimators cov(a,c)*cov(a,d)/cov(c,d)
    for col in cols:

        others = cols[cols != col]
        combs = list(combinations(others, 2))

        for comb in combs:
            if correlated is not None:
                if check_if_biased(
                    [[col, comb[0]], [col, comb[1]], [comb[0], comb[1]]],
                        correlated):
                    continue

            A_line = np.zeros(n_x).astype('int')
            A_line[np.where(cols == col)[0][0]] = 1
            A = np.vstack((A, A_line))

            y = np.append(
                y,
                cov[col][comb[0]] * cov[col][comb[1]] / cov[comb[0]][comb[1]])

    # build up A and y components for the cross-biased signal variabilities (with beta_i*beta_j)
    # i.e., the cross-biased signal variance estimators (cov(a,c)*cov(b,d)/cov(c,d))
    if correlated is not None:
        for i in np.arange(len(correlated)):
            others = cols[(cols != correlated[i][0])
                          & (cols != correlated[i][1])]
            combs = list(permutations(others, 2))

            for comb in combs:
                if check_if_biased([[correlated[i][0], comb[0]],
                                    [correlated[i][1], comb[1]], comb],
                                   correlated):
                    continue

                A_line = np.zeros(n_x).astype('int')
                A_line[len(cols) + i] = 1
                A = np.vstack((A, A_line))

                y = np.append(
                    y, cov[correlated[i][0]][comb[0]] *
                    cov[correlated[i][1]][comb[1]] / cov[comb[0]][comb[1]])

    y = np.matrix(y).T

    # ----- Solving for the parameter vector x -----

    x = (A.T * A).I * A.T * y
    x = np.squeeze(np.array(x))

    # ----- Building up the result dictionary -----

    tags = np.hstack(('sig_' + cols, 'err_' + cols, 'snr_' + cols))

    if correlated is not None:

        # remove the cross-biased signal variabilities (with beta_i*beta_j) as they are not useful
        x = np.delete(x, np.arange(len(correlated)) + len(cols))

        # Derive error cross-correlations from error covariances and error variances
        for i in np.arange(len(correlated)):
            x = np.append(
                x, x[2 * len(cols) + i] / np.sqrt(
                    x[len(cols) + np.where(cols == correlated[i][0])[0][0]] *
                    x[len(cols) + np.where(cols == correlated[i][1])[0][0]]))

        # add error covariances and correlations to the result dictionary
        tags = np.append(
            tags,
            np.array(['err_cov_' + ds[0] + '_' + ds[1] for ds in correlated]))
        tags = np.append(
            tags,
            np.array(['err_corr_' + ds[0] + '_' + ds[1] for ds in correlated]))

    # force absolute signal and error variance estimates to compensate for estimation uncertainties
    if abs_est is True:
        x[0:2 * len(cols)] = np.abs(x[0:2 * len(cols)])

    # calculate and add SNRs (in decibel units)
    x = np.insert(x, 2 * len(cols),
                  10 * np.log10(x[0:len(cols)] / x[len(cols):2 * len(cols)]))

    return dict(zip(tags, x))
Example #34
0
 def condition_number(self):
     """Condition number of x; ratio of largest to smallest eigenvalue."""
     x = np.matrix(self.x)
     ev = np.linalg.eig(x.T * x)[0]
     return np.sqrt(ev.max() / ev.min())
Example #35
0
import numpy as np
#import time

A = np.matrix('1 2 -2 1; 2 5 -2 3; -2 -2 5 3; 1 3 3 2')
b = np.matrix('4; 7; -1; 0')

def lu_decomposition(A):
    # if A.shape[0]!=A.shape[1]: implement exception

    dimension = A.shape[0]

    L = np.zeros(shape=(dimension,dimension))
    U = np.zeros(shape=(dimension,dimension))

    for i in range(dimension):
        L[i,i] = 1.0

    for i in range(dimension):
        for j in range(dimension):
            sum = 0.0

            if j >= i:
                for k in range(i+1):
                    sum += L[i,k] * U[k,j]

                U[i,j] = A[i,j] - sum
            else:
                for k in range(j+1):
                    sum += L[i,k] * U[k,j]

                L[i,j] = (A[i,j] - sum)/U[j,j]
Example #36
0
#voir dans la définition de class MonGraphe
#ou sinon
def Deg(numero):
    return len(graphe[numero].arcs)


print graphe[0].Degre()
print '*****************'

###################################### Question 9 ################

n = len(graphe)  #n*n sera la taille de la matrice
import numpy as np
import scipy.linalg
M = np.matrix([[0 for i in xrange(0, n)] for j in xrange(0, n)
               ])  #construction d'une matrice de 0 de taille n*n


#je définis une fonction qui me donne le coefficient i,j
def laplacien(i, j):
    if i == j: return graphe[i].Degre()
    elif graphe[j] in graphe[i].arcs: return -1
    else: return 0


#je modifie ma matrice en appliquant cette fonction
for i in xrange(0, n):
    for j in xrange(0, n):
        M[i, j] = laplacien(i, j)

#quelques vérifications
Example #37
0
import numpy as np
import pandas as pd
from itertools import chain
import matplotlib.pyplot as plt
import statsmodels.api as sm


# In[2]:

np.random.seed(6996)
Epsilon = np.random.randn(500)
X = np.random.normal(0, 2, (500, 500))  # this can also generate a 500*500 matrix of random numbers of normal distribution
# same as:
# X = np.random.normal(0,2,500*500)
# X = np.reshape(X, (500,500))
X = np.matrix(X)
slopesSet = np.random.uniform(1, 5, 500)
# Y = sapply(2:500,function(z) 1+X[,1:z]%*%slopesSet[1:z]+Epsilon)


# In[3]:

Y = np.array(list(map(lambda i: 1 + np.inner(X[:, :i + 1], slopesSet[:i + 1]) + Epsilon, range(500))))  # main function of Y
# since list(map()) gives us a list of n*1 matrix of list
# we have to transfer them into array(matrix) without '[]'
# and delete the useless first column
Y = np.array(list(chain(*Y)))
Y = Y[1:].transpose()
Y.shape

Example #38
0
def getRigidTransformFromLandmarks(points_dest, points_src, constraints='Tx_Ty_Tz_Rx_Ry_Rz', verbose=0):
    """
    Compute affine transformation to register landmarks
    :param points_src:
    :param points_dest:
    :param constraints:
    :param verbose: 0, 1, 2
    :return: rotsc_matrix, translation_array, points_src_reg, points_src_barycenter
    """
    # TODO: check input constraints

    # initialize default parameters
    init_param = [0, 0, 0, 0, 0, 0, 1, 1, 1]
    # initialize parameters for optimizer
    init_param_optimizer = []
    # initialize dictionary to relate constraints index to dof
    dict_dof = {'Tx': 0, 'Ty': 1, 'Tz': 2, 'Rx': 3, 'Ry': 4, 'Rz': 5, 'Sx': 6, 'Sy': 7, 'Sz': 8}
    # extract constraints
    list_constraints = constraints.split('_')
    # loop across constraints and build initial_parameters
    for i in range(len(list_constraints)):
        init_param_optimizer.append(init_param[dict_dof[list_constraints[i]]])

    # launch optimizer
    # res = minimize(minimize_transform, x0=init_param_optimizer, args=(points_src, points_dest, constraints), method='Nelder-Mead', tol=1e-8, options={'xtol': 1e-8, 'ftol': 1e-8, 'maxiter': 10000, 'maxfev': 10000, 'disp': show})
    res = minimize(minimize_transform, x0=init_param_optimizer, args=(points_dest, points_src, constraints), method='Powell', tol=1e-8, options={'xtol': 1e-8, 'ftol': 1e-8, 'maxiter': 100000, 'maxfev': 100000, 'disp': verbose})
    # res = minimize(minAffineTransform, x0=initial_parameters, args=points, method='COBYLA', tol=1e-8, options={'tol': 1e-8, 'rhobeg': 0.1, 'maxiter': 100000, 'catol': 0, 'disp': show})
    # loop across constraints and update dof
    dof = init_param
    for i in range(len(list_constraints)):
        dof[dict_dof[list_constraints[i]]] = res.x[i]
    # convert dof to more intuitive variables
    tx, ty, tz, alpha, beta, gamma, scx, scy, scz = dof[0], dof[1], dof[2], dof[3], dof[4], dof[5], dof[6], dof[7], dof[8]
    # convert results to intuitive variables
    # tx, ty, tz, alpha, beta, gamma, scx, scy, scz = res.x[0], res.x[1], res.x[2], res.x[3], res.x[4], res.x[5], res.x[6], res.x[7], res.x[8]
    # build translation matrix
    translation_array = np.matrix([tx, ty, tz])
    # build rotation matrix
    rotation_matrix = np.matrix([[np.cos(alpha) * np.cos(beta), np.cos(alpha) * np.sin(beta) * np.sin(gamma) - np.sin(alpha) * np.cos(gamma), np.cos(alpha) * np.sin(beta) * np.cos(gamma) + np.sin(alpha) * np.sin(gamma)],
                              [np.sin(alpha) * np.cos(beta), np.sin(alpha) * np.sin(beta) * np.sin(gamma) + np.cos(alpha) * np.cos(gamma), np.sin(alpha) * np.sin(beta) * np.cos(gamma) - np.cos(alpha) * np.sin(gamma)],
                              [-np.sin(beta), np.cos(beta) * np.sin(gamma), np.cos(beta) * np.cos(gamma)]])
    # build scaling matrix
    scaling_matrix = np.matrix([[scx, 0.0, 0.0], [0.0, scy, 0.0], [0.0, 0.0, scz]])
    # compute rotation+scaling matrix
    rotsc_matrix = scaling_matrix * rotation_matrix
    # compute center of mass from moving points (src)
    points_src_barycenter = np.mean(points_src, axis=0)
    # apply transformation to moving points (src)
    points_src_reg = ((rotsc_matrix * (np.matrix(points_src) - points_src_barycenter).T).T + points_src_barycenter) + translation_array

    logger.info(f"Matrix:\n {rotation_matrix}")
    logger.info(f"Center:\n {points_src_barycenter}")
    logger.info(f"Translation:\n {translation_array}")

    if verbose == 2:
        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d import Axes3D

        fig = plt.figure()
        ax = fig.gca(projection='3d')
        points_src_matrix = np.matrix(points_src)
        points_dest_matrix = np.matrix(points_dest)

        number_points = len(points_dest)

        ax.scatter([points_dest_matrix[i, 0] for i in range(0, number_points)],
                   [points_dest_matrix[i, 1] for i in range(0, number_points)],
                   [points_dest_matrix[i, 2] for i in range(0, number_points)], c='g', marker='+', s=500, label='dest')
        ax.scatter([points_src_matrix[i, 0] for i in range(0, number_points)],
                   [points_src_matrix[i, 1] for i in range(0, number_points)],
                   [points_src_matrix[i, 2] for i in range(0, number_points)], c='r', label='src')
        ax.scatter([points_src_reg[i, 0] for i in range(0, number_points)],
                   [points_src_reg[i, 1] for i in range(0, number_points)],
                   [points_src_reg[i, 2] for i in range(0, number_points)], c='b', label='src_reg')
        ax.set_xlabel('x')
        ax.set_ylabel('y')
        ax.set_zlabel('z')
        ax.set_aspect('auto')
        plt.legend()
        # plt.show()
        plt.savefig('getRigidTransformFromLandmarks_plot.png')

        fig2 = plt.figure()
        plt.plot(sse_results)
        plt.grid()
        plt.title('#Iterations: ' + str(res.nit) + ', #FuncEval: ' + str(res.nfev) + ', Error: ' + str(res.fun))
        plt.show()
        plt.savefig(os.path.join('getRigidTransformFromLandmarks_iterations.png'))

    # transform numpy matrix to list structure because it is easier to handle
    points_src_reg = points_src_reg.tolist()

    return rotsc_matrix, translation_array, points_src_reg, points_src_barycenter
Example #39
0
def main():
    # 打开一个jpg图像文件,注意是当前路径:
    #img_dir = " /Users/williammeng/Documents/Machine Learning/Shopee competition/Training Images "
    img_dir = os.path.abspath('..')
    BabyBibs_dir = img_dir + '/Training Images/BabyBibs/BabyBibs_300.jpg'
    im = Image.open(BabyBibs_dir)
    #imageRotate(-2)
    #ImageSave(im)
    #调整图片大小
    #im.resize((imageWidth, imageHeight),Image.ANTIALIAS)
    im = im.convert('L')  #L为灰度图,1是黑白图
    #new_im = im.filter(ImageFilter.CONTOUR)
    #new_im.show()
    im = scipy.misc.imresize(im, (imageHeight, imageHeight))
    #data = im.getdata()#变为一维矩阵
    #data = np.asarray(im,np.float32)
    #data = np.matrix(new_im,dtype='float')
    #convert matrix to image
    #imgk = Image.fromarray(data)
    #imgk.show()

    #data = np.reshape(data,(imageHeight, imageWidth))#变为二维矩阵
    #np.save('B_1.npy',data)
    #new_data = np.load('B_1.npy')
    im = invert(im)
    #clearNoise(im,4,data)#降噪
    #data=np.where(data > 120, 1, 0)#二值化
    #toArray(data)
    #new_im, newdata = corrosion(data)#腐蚀
    #final_img_list = carve(new_im, newdata)#图片切割
    '''矩阵转图片'''
    new_im = Image.fromarray(im)  #如果矩阵中只有0,1,data要乘255
    # 显示图片
    #new_im.show()
    '''图片和标准图片进行对比(0-9,10个数字),相识度高的就是结果。'''
    scoure = []
    for i in range(10):
        #cut_img.clear()
        #cut_img=Image.open(str(i)+'.jpg')
        cut_img = final_img_list[i]
        for j in range(10):
            #num_img.clear()
            num_img = Image.open(str(j) + '.png')
            cut_img = cut_img.convert('1')
            num_img = num_img.convert('1')

            width = min(cut_img.width, num_img.width)
            height = min(cut_img.height, num_img.height)
            #print("a",width,height,cut_img.width,num_img.width,cut_img.height,num_img.height)
            cut_img = cut_img.resize((width, height))  #返回值和thumbnail不一样,
            num_img = num_img.resize((width, height))
            #print(width,cut_img.width,num_img.width,height,cut_img.height,num_img.height)
            cut_img.save(str(i) + str(j) + str(0) + '.jpg', 'jpeg')
            num_img.save(str(i) + str(j) + str(1) + '.jpg', 'jpeg')
            count = 0
            for m in range(0, width):
                for n in range(0, height):
                    #print(m,n,width,height,cut_img.width,num_img.width,cut_img.height,num_img.height)
                    if (cut_img.getpixel((m, n)) == num_img.getpixel((m, n))
                            and num_img.getpixel((m, n)) == 0):
                        count = count + 1
            scoure.append(count)
    scoure = np.matrix(scoure).reshape((10, 10))  #变为二维矩阵
    for i in range(10):
        print(scoure[i])
    print(scoure.argmax(axis=1))
Example #40
0
def well_pull():
	try:
		connection = pyodbc.connect(r'Driver={SQL Server Native Client 11.0};'
									r'Server=10.75.6.160, 1433;'
									r'Database=OperationsDataMart;'
									r'trusted_connection=yes'
									)
	except pyodbc.Error:
		print("Connection Error")
		sys.exit()

	cursor = connection.cursor()
	SQLCommand = ("""
		DROP TABLE IF EXISTS #Normal
		DROP TABLE IF EXISTS #Plunger

		SELECT	P.Wellkey
				,DP.WellFlac
				,W.API
				,DP.BusinessUnit
				,DP.DateKey
				,DP.AllocatedOil
				,DP.AllocatedGas
				,DP.AllocatedWater
				,P.LastChokeEntry
				,P.LastChokeStatus
			INTO #Normal
			FROM [Business].[Operations].[DailyProduction] DP
			JOIN [OperationsDataMart].[Dimensions].[Wells] W
			  ON W.WellFlac = DP.WellFlac
			JOIN [OperationsDataMart].[Facts].[Production] P
			  ON P.Wellkey = W.Wellkey
			  AND P.DateKey = DP.DateKey
			WHERE P.LastChokeStatus = 'Normal Operations'
			  AND DP.DateKey >= DATEADD(year, -2, GETDATE())
			  --AND DP.AllocatedGas > 0
			  --AND DP.AllocatedOil > 0
			  --AND DP.AllocatedWater > 0
	""")

	cursor.execute(SQLCommand)

	SQLCommand = ("""
		SELECT DISTINCT N.API
						,N.Wellkey
						,N.WellFlac
						,N.BusinessUnit
						,CASE WHEN AvGas.AvgGas IS NOT NULL
							  THEN AvGas.AvgGas
							  ELSE 0 END AS AvgGas
						,CASE WHEN N.BusinessUnit = 'North'
								THEN (LG.Weighted_OilGasRatio + LG.Weighted_WaterGasRatio)
								ELSE ((CASE WHEN AvOil.AvgOil IS NOT NULL
											  THEN AvOil.AvgOil
											  ELSE 0 END + CASE WHEN AvWat.AvgWater IS NOT NULL
																  THEN AvWat.AvgWater
																  ELSE 0 END) / CASE WHEN AvGas.AvgGas IS NOT NULL
																				  THEN AvGas.AvgGas
																				  ELSE NULL END) END AS LGR
						,CASE WHEN PT.plungerType LIKE '%Stock%' OR PT.plungerType LIKE '%stock%' OR
									PT.plungerType LIKE '%Cleanout%' OR PT.plungerType LIKE '%Snake%' OR
									PT.plungerType LIKE '%Venturi%' OR PT.plungerType LIKE '%Viper%' OR PT.plungerType LIKE '%Vortex%'
								THEN 'Bar Stock'
								WHEN PT.plungerType LIKE '%acemaker%' OR PT.plungerType LIKE '%Center%' OR
									PT.plungerType LIKE '%ypass%' OR PT.plungerType LIKE '%Sleeve%' OR
									PT.plungerType LIKE '%y-pass%'
								THEN 'Pacemaker/Bypass'
								WHEN PT.plungerType LIKE '%Other%' OR PT.plungerType LIKE '%6%' OR
									PT.plungerType LIKE '%8%' OR PT.plungerType LIKE '%Brush%' OR
									PT.plungerType LIKE '%Sphere%'
								THEN 'Other'
								WHEN PT.plungerType IS NULL
								THEN NULL
								ELSE 'Padded' END AS PlungerType
			--INTO #Plunger
			FROM (SELECT	API
						,Wellkey
						,WellFlac
						,BusinessUnit
						--,(AVG(AllocatedOil) + AVG(AllocatedWater)) / AVG(AllocatedGas) AS LGR
					FROM #Normal
					GROUP BY API, Wellkey, BusinessUnit, WellFlac) N
			LEFT OUTER JOIN (SELECT  API
						,AVG(AllocatedGas) AS AvgGas
					FROM #Normal
					WHERE AllocatedGas > 0
					GROUP BY API) AvGas
			  ON AvGas.API = N.API
			LEFT OUTER JOIN (SELECT	API
									,AVG(AllocatedOil) AS AvgOil
							FROM #Normal
							WHERE AllocatedOil > 0
							GROUP BY API) AvOil
			  ON AvOil.API = N.API
			LEFT OUTER JOIN (SELECT  API
						,AVG(AllocatedWater) AS AvgWater
				FROM #Normal
				WHERE AllocatedWater > 0
				GROUP BY API) AvWat
			  ON AvWat.API = N.API
			LEFT OUTER JOIN [TeamOptimizationEngineering].[dbo].[NorthLGR4] LG
			ON LG.WellKey = N.Wellkey
			LEFT OUTER JOIN (SELECT	A.apiNumber
									,P.plungerManufacturer
									,P.plungerType
								FROM [EDW].[Enbase].[PlungerInspection] P
								JOIN [EDW].[Enbase].[Asset] A
								  ON A._id = P.assetId
								INNER JOIN (SELECT	A.apiNumber
													,MAX(PlI.createdDate) AS MaxDate
											FROM [EDW].[Enbase].[PlungerInspection] PlI
											JOIN [EDW].[Enbase].[Asset] A
											  ON A._id = PlI.assetId
											GROUP BY A.apiNumber) MaxP
								  ON MaxP.apiNumber = A.apiNumber
								  AND MaxP.MaxDate = P.createdDate
								WHERE A.assetType = 'Well') PT
			ON LEFT(PT.apiNumber, 10) = N.API
			--WHERE PT.plungerType IS NOT NULL
			WHERE N.Wellkey IN (SELECT	MAX(DISTINCT(Wellkey))
								FROM #Normal
								GROUP BY API)
	""")

	cursor.execute(SQLCommand)
	results = cursor.fetchall()

	df = pd.DataFrame.from_records(results)
	connection.close()

	try:
		df.columns = pd.DataFrame(np.matrix(cursor.description))[0]
		df.drop_duplicates(inplace=True)
	except:
		df = None
		print('Dataframe is empty')

	df.loc[:,'API'] = df.loc[:,'API'].astype(float)

	return df
def saveSummaryResult(outfile, result, timeinfo, para):
    fileID = open(outfile + '.txt', 'w')
    print ('Average result: [%s]'%outfile)
    print 'Metrics:', para['metrics'] 
    fileID.write('======== Results summary ========\n')
    fileID.write('Metrics:    ')
    for metric in para['metrics']:
        fileID.write('|   %s  '%metric)
    fileID.write('\n')
    fileID.write('[Average]\n')
    
    k = 0
    for den in para['density']:
        den_result = result[k, :, :, :]
        evalResults = np.average(den_result, axis=2)
        fileID.write('density=%.2f: '%den)
        avgResult = np.average(evalResults, axis=0)
        np.savetxt(fileID, np.matrix(avgResult), fmt='%.4f', delimiter='  ')
        print 'density=%.2f: '%den, avgResult
        k += 1

    fileID.write('\n[Standard deviation (std)]\n')
    k = 0
    for den in para['density']:
        den_result = result[k, :, :, :]
        evalResults = np.average(den_result, axis=2)
        fileID.write('density=%.2f: '%den)
        np.savetxt(fileID, np.matrix(np.std(evalResults, axis=0)), fmt='%.4f', delimiter='  ')
        k += 1

    fileID.write('\n======== Detailed results ========\n')
    k = 0
    for den in para['density']:
        den_result = result[k, :, :, :]
        fileID.write('[density=%.2f, %2d rounds]\n'%(den, para['rounds']))
        np.savetxt(fileID, np.matrix(np.average(den_result, axis=2)), fmt='%.4f', delimiter='  ')
        fileID.write('\n')
        k += 1
    k = 0
    for den in para['density']:
        den_result = result[k, :, :, :]
        fileID.write('[density=%.2f, %2d slices]\n'%(den, result.shape[3]))
        np.savetxt(fileID, np.matrix(np.average(den_result, axis=0).T), fmt='%.4f', delimiter='  ')
        fileID.write('\n')
        k += 1
    fileID.close()

    if para['saveTimeInfo']:
        fileID = open(outfile + '_time.txt', 'w')
        fileID.write('======== Summary ========\n')
        fileID.write('Average running time (second):\n')
        k = 0
        for den in para['density']:
            den_time = timeinfo[k, :, :]
            timeResults = np.average(den_time, axis=1)
            fileID.write('density=%.2f: '%den)
            np.savetxt(fileID, np.matrix(np.average(timeResults)), fmt='%.4f', delimiter='  ')
            k += 1
        
        fileID.write('\n======== Details ========\n')
        k = 0
        for den in para['density']:
            den_time = timeinfo[k, :, :]
            fileID.write('[density=%.2f, %2d slices]\n'%(den, timeinfo.shape[2]))
            np.savetxt(fileID, np.matrix(np.average(den_time, axis=0)).T, fmt='%.4f', delimiter='  ')
            fileID.write('\n')
            k += 1

        fileID.close()
Example #42
0
Matrizes - Operações elemento por elemento - Multiplicação e Divisão

Exemplo 3.12

Autor: Guilherme A. Barucke Marcondes
"""

# Importa biblioteca numpy que permite trabalhar com matrizes.
# Função matrix para definir matrizes.
from numpy import matrix

# Limpa a área de console para facilitar a visualização do resultado.
print('\n' * 100)

# Cria matriz A com valores inteiros (int).
matriz_A = matrix([[7, 2, 4], [3, 5, 9], [1, 6, 8]])

print('Matriz A')
print(matriz_A)

# Multiplica todos os elementos da Matriz A por um valor (3).
matriz_B = matriz_A * 3

print('\n')
print('Todos os elementos da Matriz A multiplicados por 3.')
print(matriz_B)

# Divide todos os elementos da Matriz A por 2.
matriz_B = matriz_A / 2

# Como todos os valores são inteiros, o resultado
    def ComputeCoeff(self, x1, x2, y):

        a, b, c = symbols("a b c", real=True)

        F = [0] * 3  # Function Matrix (number of functions)

        length = len(F)
        jacabMatrix = [[0 for x in range(length)] for y in range(length)]

        # Defining Functions

        mainFunc = (y[0] - a * (b**x1[0]) * (c**x2[0]))**2

        for i in range(1, 60):
            mainFunc = mainFunc + (y[i] - a * (b**x1[i]) * (c**x2[i]))**2

        F[0] = diff(mainFunc, a)
        # Diff by b
        F[1] = diff(mainFunc, b)
        # Diff by c
        F[2] = diff(mainFunc, c)

        for i in range(0, len(F)):
            # Diff by a
            jacabMatrix[i][0] = diff(F[i], a)
            # Diff by b
            jacabMatrix[i][1] = diff(F[i], b)
            # Diff by c
            jacabMatrix[i][2] = diff(F[i], c)

        # newton
        iterations = 0
        jacabMatrixVal = [[0 for x in range(length)] for y in range(length)]
        FVal = [0] * length

        root = [1.5, 1.5, 1.5]  # Intial matrix
        err = 100

        while err > (10**(-2)):

            print("*******************************************")
            print("Iteration", iterations)
            print("*******************************************")

            # get the old root
            old_root = np.array(root).astype(
                np.float64)  # you may need to convert root matrix to list

            # Compute Hessian Matrix
            for i in range(0, length):
                for j in range(0, length):
                    jacabMatrixVal[i][j] = jacabMatrix[i][j].subs([
                        (a, root[0]), (b, root[1]), (c, root[2])
                    ])

            # Compute Function Matrix
            for i in range(0, length):
                FVal[i] = F[i].subs([(a, root[0]), (b, root[1]), (c, root[2])])

            # Convert lists to matrices
            jacabMatrixVal = np.asarray(jacabMatrixVal)
            FVal = np.asarray(FVal)

            # compute Hessian Inverse
            jacabMatrixInv = inv(np.matrix(jacabMatrixVal, dtype="float"))

            # Compute new roots
            root = root - jacabMatrixInv.dot(FVal)

            # break at 100 iteration

            root_norm = np.array(root).astype(np.float64)

            # compute Error
            err = abs(
                np.linalg.norm(root_norm - old_root) /
                np.linalg.norm(old_root))
            root = root.tolist()[0]
            print("Result root : ", root)
            print("err : ", err)
            iterations = iterations + 1
            # Break at 10 iterations
            if iterations == 10:
                break

        return root[0], root[1], root[2]
xLengthGraph=10
Xlegend = "Threshold"
TotalIter = [0 for a3 in range (xLengthGraph)]
changeVar = [0 for a4 in range (xLengthGraph)]
WinnerGraph = [ 0 for i in range(xLengthGraph) ]
winnerVotes= [0 for i2 in range(len(typeOfVotes))]
Opinions2={}

createCom(numberOfCom,MinPeople, MaxPeople)
probMatrix(numberOfCom,probs,MinFriendsIn,MaxFriendsIn)

print("size of Coum :")
print(sizes)

print("probs :")
print(np.matrix(probs))

BlockGraph = nx.stochastic_block_model(sizes, probs, seed=364)
edges = nx.edges(BlockGraph)
friends= [  {-1} for j in range(len(BlockGraph) )]

Opinions=creatOpinions(BlockGraph, typeOfVotes, Opinions2, winnerVotes)
winnerStart = getWinner (winnerVotes,typeOfVotes)
votes = [[0 for i in range(len(typeOfVotes))] for j in range(len(friends))]
votes2 = [[0 for i in range(len(typeOfVotes))] for j in range(len(friends))]

prints (typeOfVotes, winnerVotes, winnerStart, Opinions2, edges)

setFridends(edges, friends)

print("friends :")
Example #45
0
import numpy as np
from sklearn.preprocessing import PolynomialFeatures

'''
PolynomialFeatures(degree=2, interaction_only=False, include_bias=True)

'''

data = [[1,5],
        [2,6],
        [3,7],
        [4,8]]

print('original data')
print(np.matrix(data))
print("---------------------------------------------")

dataScaling = PolynomialFeatures(degree=2, interaction_only=False, include_bias=True )
new_data = dataScaling.fit_transform(data)

print('data after scaling')
print(new_data)
def get_landmarks(im):
    rects = cascade.detectMultiScale(im, 1.3, 5) # 人脸检测
    x, y, w, h = rects[0]  # 获取人脸的四个属性值,左上角坐标 x,y 、高宽 w、h
#     print(x, y, w, h)
    rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
    return np.matrix([[p.x, p.y] for p in predictor(im, rect).parts()])
Example #47
0
 def RTz(self, angle, tx, ty, tz):
     RT = np.matrix([[math.cos(angle), -math.sin(angle), 0, tx], [math.sin(angle), math.cos(angle),0, ty], [0, 0, 1, tz], [0,0,0,1]])
     return RT
Example #48
0
"""
Numpy_3
"""
import numpy as np
v1 = np.arange(5)
print('输出v1:')
print(v1)  # [0 1 2 3 4]
# 按照元素顺序逐个加 2
print('输出v1+2:')
print(v1 + 2)  # [2 3 4 5 6]
# 执行v1 * v1,矩阵的乘法是按照元素逐个相乘
print('输出v1*v1:')
print(v1 * v1)  # [0 1 4 9 16]
# 5行2列矩阵  取值为[1-10]的随机整数
v2 = np.random.randint(1, 10, (5, 2))
print('输出v2:')
print(v2)
# 此时无法直接将两个矩阵相乘,why?
# print(v1*v2)
# 1.使用 dot函数实现矩阵乘法,自己运算验证
v3 = np.dot(v1, v2)
print('使用dot函数实现矩阵相乘结果:')
print(v3)
# 输出矩阵的 " 形状 ",也就是行列维度
print('输出v3的shape:')
print(v3.shape)
# 2.转化为 matirx 对象,再相乘
v3 = np.matrix(v1) * np.matrix(v2)
print('输出使用matrix实现的矩阵相乘结果:')
print(v3)
Example #49
0
 def project_wc_to_cc(self, xw, yw, zw):
     vector_wc = np.matrix([xw, yw, zw, 1]).T
     vector_cc = self.matrix_H34 * vector_wc
     xp = vector_cc[0,0] / vector_cc[2,0]
     yp = vector_cc[1,0] / vector_cc[2,0]
     return (xp, yp)
Example #50
0
import seaborn as sns
import matplotlib.pyplot as plt

# %%
# VIF
df=pd.DataFrame({
        'a':[1.2,2.1,3,3.9,5.2,6.1],
        'b':[6,5,4,3,2,1],
        'c':[11,53,29,64,12,1],
        'd':[32.2,34.1,36.05,38.3,40.1,42.2],
        'e':[73.1,75.3,77.1,79.2,80.8,83.1]
    })

df['f']=[1 for j in range(len(df))]
name = df.columns
x = np.matrix(df)
VIF_list = [variance_inflation_factor(x,i) for i in range(x.shape[1])]
VIF = pd.DataFrame({'feature':name,"VIF":VIF_list})
VIF = VIF.drop(VIF[VIF.feature=='f'].index)

print(VIF)
print(df.describe())
sns.pairplot(df)
plt.savefig('test_vif.png')
plt.show()

# %%
# Pearson

df=pd.DataFrame({
        'a':[1.2,2.1,3,3.9,5.2,6.1],
from sklearn.metrics import mean_squared_error
import numpy as np
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file

mem = Memory("./mycache4")

@mem.cache
def get_data(data):
    my_data = load_svmlight_file(data)
    return my_data[0], my_data[1]

X_train, y_train = get_data("E2006.train.bz2") # X_train: 16087 x 150360
X_test, y_test = get_data("E2006.test.bz2")

y_test = np.matrix(y_test)
y_test = np.transpose(y_test)


X_train = X_train[:, :-2] # 16087 x 150358
y_train = np.matrix(y_train) # 16087 x 1
y_train = np.transpose(y_train)

reg = linear_model.Ridge(max_iter = 50, alpha = 1)
y_pred = reg.fit(X_train, y_train)
y_pred = y_pred.coef_

y_true = reg.fit(X_test, y_test)
y_true = y_true.coef_

print("pred:", y_pred)
Example #52
0
import numpy as np

node_1=np.array([2,0,0])
node_2 = np.array([2,0,-1])
node_3 = np.array([2,1,0])

uv_1 = node_2 - node_1
uv_1_mag = np.linalg.norm(uv_1)
UV_1 = uv_1 / uv_1_mag

uv_2 = node_3 - node_1
uv_2_mag = np.linalg.norm(uv_2)
UV_2 = uv_2 / uv_2_mag

UV_3 = np.cross(uv_1, uv_2)

T = np.matrix([[UV_1[0], UV_2[0], UV_3[0], node_1[0]], [UV_1[1], UV_2[1], UV_3[1], node_1[1]],[UV_1[2], UV_2[2], UV_3[2], node_1[2]], [0, 0, 0, 1]])

H = np.linalg.inv(T)

test_v = np.array([[3],[0.5],[0],[1]])

result = H*test_v

print("placeholder")
Example #53
0
re_draw.canvas.show()
re_draw.canvas.get_tk_widget().grid(row=0, columnspan=3)

Label(root, text='toln').grid(row=1, column=0)
# widget input
toln_entry = Entry(root)
toln_entry.grid(row=1, column=1)
toln_entry.insert(0, '10')

Label(root, text='tols').grid(row=2, column=0)
tols_entry = Entry(root)
tols_entry.grid(row=2, column=1)
tols_entry.insert(0, '1.0')

# widget button
Button(root, text='ReDraw', command=draw_new_tree).grid(row=1,
                                                        column=2,
                                                        rowspan=3)

# widget checkbutton
chk_btn_var = IntVar()
chk_btn = Checkbutton(root, text='Model tree', variable=chk_btn_var)
chk_btn.grid(row=3, column=0, columnspan=2)

re_draw.raw_dat = np.matrix(regression_CART.load_data('sine.txt'))
re_draw.test_dat = np.arange(min(re_draw.raw_dat[:, 0]),
                             max(re_draw.raw_dat[:, 0]), 0.01)
re_draw(1.0, 10)

root.mainloop()
Example #54
0
    0.94102916239858159, 0.94523004181126224, 0.95604994438455537,
    0.96196524827024299, 0.97572104193998899, 0.98250173726301571,
    0.98459481759219614, 0.98582439568368907, 0.98603928188540468,
    0.98930195905662144, 0.98930195905662144, 0.98924939797415179,
    0.99121023294912192, 0.99145779696675296, 0.99105606885336761,
    0.98996399375281807, 0.99080654523775913, 0.99080654523775913,
    0.99080654523775913, 0.99137841949463501, 0.99250206095183258,
    0.99284211959118029, 0.99287701929042438, 0.99289411857097043,
    0.99289411857097043, 0.99289411857097043, 0.99289411857097043,
    0.99289411857097043, 0.99289411857097043, 0.99289411857097043,
    0.99289411857097043, 0.99289411857097043, 0.99289411857097043,
    0.99289411857097043, 0.99289411857097043, 0.99289411857097043,
    0.99289411857097043, 0.99289411857097043
])

average_potential = list(np.mean(np.matrix(fscore_potential), axis=0).A1)
label_potential = [
    4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 50, 60, 70, 80, 90, 100, 110, 120,
    130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270,
    280, 290, 300, 310, 320, 330, 340, 350, 360, 370, 380, 390, 400, 410, 420,
    430, 440, 450, 460, 470, 480, 490, 500, 510, 520, 530, 540, 550, 560, 570,
    580, 590, 600, 610, 620, 630, 640
]

fscore_potential_hist_nochange = []
fscore_potential_hist_nochange.append([
    0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.19636241388161374,
    0.22969610398107057, 0.38307648356531809, 0.49293426498574872,
    0.60735727997732469, 0.72961502802537359, 0.73320045118126065,
    0.7429067026680094, 0.76311627793035686, 0.77680086943248139,
    0.78130788231369608, 0.81026003128417101, 0.81931200788125191,
    for i in range(len(Y_test)):
        if (yhat[i] < 0.5 and Y_test[i] == 0) or (yhat[i] > 0.5
                                                  and Y_test[i] == 1):
            acc += 1
    acc = acc / len(Y_test)

    print(acc, 'correct.')

    accuracies.append(acc)

    #######################################
    # Do training via gradient descent
    # for all the training data
    #######################################
    for i in range(len(X_train)):
        yhat = nn.forward(X_train[i])
        # Y_train is made a matrix
        nn.backward(np.matrix(Y_train[i]), yhat)

plt.plot(accuracies)
titleStr = 'Two-class training with ' + str(len(num_neurons) -
                                            1) + ' hidden layer'
if len(num_neurons) > 2:
    titleStr += 's'
if beta != 0.0:
    titleStr += ' and momentum'
plt.title(titleStr)
plt.xlabel('Iteration')
plt.ylabel('Proportion correct')
plt.show()
Example #56
0
file3 = path_data + filename3
file4 = path_data + filename4
file5 = path_data + filename5

#%% Compute stiffness parameters from ANSYS input values
Exx = 40.07e09
Eyy = 10.07e09
Ezz = 10.07e09
PRxy = 0.30
PRyz = 0.25875
PRxz = 0.30
Gxy = 4.0e09
Gyz = Eyy / (2 * (1 + PRyz))
Gxz = 4.0e09

S = np.matrix([[1 / Exx, -PRxy / Exx, 0], [-PRxy / Exx, 1 / Eyy, 0],
               [0, 0, 1 / Gxy]])

Q = inv(S)

Q11 = Q[0, 0] / 1e09
Q12 = Q[0, 1] / 1e09
Q22 = Q[1, 1] / 1e09
Q66 = Q[2, 2] / 1e09

print('----------------------------------------------------------------------')
print('Calculated stiffness paramters:')
print('Q11: ' + str(round(Q11, 2)) + ' GPa')
print('Q22: ' + str(round(Q22, 2)) + ' GPa')
print('Q12: ' + str(round(Q12, 2)) + ' GPa')
print('Q66: ' + str(round(Q66, 2)) + ' GPa')
print('----------------------------------------------------------------------')
Example #57
0
    def inference(self, kern, likelihood, mean_function = None,W=None, Y_metadata=None,precision = None):
        
        """
        Not sure about kronmagic yet
        """
        
        
        """
        Not sure about design of kern yet. 
        Needs to contain something like "is_toeplitz" per grid dimension
        and has to provide p different Kernel-matrices (p = #grid dimensions)
        
        
        
        
        ????? Do we even need partial grids for MSGP????? Probably not
        TO-DO: Incomplete grids:
                    The M-matrix is usually used to lift incomplete grid information
                    to a complete grid (y-values only for parts of the grid)
        """
        if not isinstance(likelihood,likelihoods.Gaussian):
            raise NotImplementedError("Not sure what todo if likelihood is non-gaussian")
        
        
        if kern.name != 'kern_grid':
            raise ValueError("kernel type has to be kern_grid")
            
        
        Z = kern.Z
        X = kern.X
        Y = kern.Y 
        W_x_z = kern.W_x_z

        kronmagic = False 
        
        if mean_function is None: 
            m = 0
        else:
            m = mean_function.f(X)
        
        if precision is None:
            precision = likelihood.gaussian_variance()
        

        p = len(Z) 
        n_grid = 1 
        n_data = np.shape(Y)[0]
        D=0
        for i in range(p):
            n_grid=n_grid*np.shape(Z[i])[0]
            D = D+np.shape(Z[i])[1]
        
        K  = kern.get_K_Z()

        is_toeplitz = kern.get_toeplitz_dims()
        #xe = np.dot(M,xe)
        #n=np.shape(xe)[1]
        for j in range(len(K)):
            if is_toeplitz[i] == True:
                K[j] = linalg.toeplitz(K[j])
                
        sn2 = np.exp(2*precision) 
        
        V = []
        E = []
    #with Timer() as t:
        for j in range(len(K)):
            if is_toeplitz[i] == True:
                V_j,E_j = msgp_linalg.eigr(linalg.toeplitz(K[j]))
            else:
                V_j,E_j = msgp_linalg.eigr(K[j])

            V.append(np.matrix(V_j))
            E.append(np.matrix(E_j))#
    #print("Runtime eigendecomposition: {}".format(t.secs))
        
        e = np.ones((1,1))
        for ii in range(len(E)):
              e = linalg.kron(np.matrix(np.diag(E[ii])).T,e) 
        e = np.ravel(e.astype(float))
        if kronmagic:
            """
            Problem: here we would need the eigendecomposition of the kernel
            matrices. We would need to get those from the kernels.
            
            """
            raise NotImplementedError
            #s = 1/(e+sn2)
            #order = range(1,N)
        else:

            sort_index = np.argsort(e)

            sort_index = sort_index[::-1]
            sort_index = np.ravel(sort_index)
            
            eord = e[sort_index]

            """
            if n<N: ##We dont need this here <- only for partial grid structure
                eord = np.vstack((eord,np.zeros((n-N),1)))
                order = np.vstack((order,range(N+1,n).T))
            """

            s = 1./((float(n_data)/float(n_grid))*eord[0:n_data]+sn2) 

           
           
        if kronmagic:
        ## infGrid.m line 61
            raise NotImplementedError
        else:
            
            kron_mvm_func = lambda x: msgp_linalg.mvm_K(x,K,sn2,W_x_z)
            shape_mvm = (n_data,n_data) ## To-Do: what is the shape of the mvm?
            L = lambda x: -msgp_linalg.solveMVM(x,kron_mvm_func,shape_mvm,cgtol = 1e-6,cgmit=1000) # ein - zuviel?
        
 
        alpha = -L(Y-m) 

        lda = -np.sum(np.log(s))
        #lda_exact = sum(np.log(np.diag(linalg.cholesky(W_x_z.dot(W_x_z.dot(msgp_linalg.kronmvm(K,np.eye(n_grid))).T).T+sn2*np.eye(n_data)))));
        #lda = lda_exact
        #log_marginal = np.dot((Y-m).T,alpha)/2 + n_data*np.log(2*np.pi)/2 + lda/2 
        #print("model fit: {}".format(np.dot((Y-m).T,alpha)/2))
        #print("complexity: {}".format(lda/2))
        #print("complexity exact: {}".format(lda_exact))
        log_marginal = (np.dot((Y-m).T,alpha)/2 + n_data*np.log(2*np.pi)/2 + lda/2) *(-1)
        #print(log_marginal)
            
        #print(alpha)
        #print(W_x_z.T)
        #print(np.max(K[0]))
        #alpha_pred = msgp_linalg.kronmvm(K,W_x_z.T.dot(alpha)) # we need the term K_Z_Z*W.T*alpha for the predictive mean
        
        """
        Calculate  the nu-term for predictive variance
        
        TO-DO: not sure if the K_tilde kronmvm is correct
        """    

        
    
        grad_dict = dict()
        grad_dict["V"] = V 
        grad_dict["E"] = E
        grad_dict["S"] = dict()
        grad_dict["S"]["s"] = s
        grad_dict["S"]["eord"] = eord
        grad_dict["S"]["sort_index"] = sort_index
        post = dict()
        post["alpha"] = alpha
        
        
        return post, log_marginal, grad_dict
Example #58
0
import numpy as np
import cv2
import platform
import sys
import time

# hsv tennis ball yellow
upper = np.array([70, 240, 255])
lower = np.array([20, 45, 130])

# hsv cochonnet red-orange ish
#upper = np.array([150, 105, 255])
#lower = np.array([0, 0, 0,])

state = np.matrix('0.0;0.0;0.0;0.0')  # x, y, xd, yd,

# P and Q matrices for EKF
P = np.matrix('10.0,0.0,0.0,0.0; \
				0.0,10.0,0.0,0.0; \
					0.0,0.0,10.0,0.0; \
						0.0,0.0,0.0,10.0')

Q = np.matrix('2.0,0.0,0.0,0.0; \
				0.0,2.0,0.0,0.0; \
					0.0,0.0,2.0,0.0; \
						0.0,0.0,0.0,2.0')

measurement = np.matrix('0;0')

debug_print = False
Example #59
0
def loadData_Tokenizer(MAX_NB_WORDS, MAX_SEQUENCE_LENGTH):

    fname = os.path.join(path_WOS, "WebOfScience/WOS5736/X.txt")
    fnamek = os.path.join(path_WOS, "WebOfScience/WOS5736/YL1.txt")
    fnameL2 = os.path.join(path_WOS, "WebOfScience/WOS5736/YL2.txt")

    with open(fname) as f:
        content = f.readlines()
        content = [clean_str(x) for x in content]
    content = np.array(content)
    with open(fnamek) as fk:
        contentk = fk.readlines()
    contentk = [x.strip() for x in contentk]
    with open(fnameL2) as fk:
        contentL2 = fk.readlines()
        contentL2 = [x.strip() for x in contentL2]
    Label = np.matrix(contentk, dtype=int)
    Label = np.transpose(Label)
    number_of_classes_L1 = np.max(Label) + 1  #number of classes in Level 1

    Label_L2 = np.matrix(contentL2, dtype=int)
    Label_L2 = np.transpose(Label_L2)
    np.random.seed(7)

    Label = np.column_stack((Label, Label_L2))

    number_of_classes_L2 = np.zeros(
        number_of_classes_L1, dtype=int
    )  #number of classes in Level 2 that is 1D array with size of (number of classes in level one,1)

    tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
    tokenizer.fit_on_texts(content)
    sequences = tokenizer.texts_to_sequences(content)
    word_index = tokenizer.word_index

    print('Found %s unique tokens.' % len(word_index))

    content = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)

    indices = np.arange(content.shape[0])
    np.random.shuffle(indices)
    content = content[indices]
    Label = Label[indices]
    print(content.shape)

    X_train, X_test, y_train, y_test = train_test_split(content,
                                                        Label,
                                                        test_size=0.2,
                                                        random_state=0)

    L2_Train = []
    L2_Test = []
    content_L2_Train = []
    content_L2_Test = []
    '''
    crewate #L1 number of train and test sample for level two of Hierarchical Deep Learning models
    '''
    for i in range(0, number_of_classes_L1):
        L2_Train.append([])
        L2_Test.append([])
        content_L2_Train.append([])
        content_L2_Test.append([])

        X_train = np.array(X_train)
        X_test = np.array(X_test)
    for i in range(0, X_train.shape[0]):
        L2_Train[y_train[i, 0]].append(y_train[i, 1])
        number_of_classes_L2[y_train[i, 0]] = max(
            number_of_classes_L2[y_train[i, 0]], (y_train[i, 1] + 1))
        content_L2_Train[y_train[i, 0]].append(X_train[i])

    for i in range(0, X_test.shape[0]):
        L2_Test[y_test[i, 0]].append(y_test[i, 1])
        content_L2_Test[y_test[i, 0]].append(X_test[i])

    for i in range(0, number_of_classes_L1):
        L2_Train[i] = np.array(L2_Train[i])
        L2_Test[i] = np.array(L2_Test[i])
        content_L2_Train[i] = np.array(content_L2_Train[i])
        content_L2_Test[i] = np.array(content_L2_Test[i])

    embeddings_index = {}
    '''
    For CNN and RNN, we used the text vector-space models using $100$ dimensions as described in Glove. A vector-space model is a mathematical mapping of the word space
    '''
    Glove_path = os.path.join(GLOVE_DIR, 'glove.6B.100d.txt')
    print(Glove_path)
    f = open(Glove_path, encoding="utf8")
    for line in f:
        values = line.split()
        word = values[0]
        try:
            coefs = np.asarray(values[1:], dtype='float32')
        except:
            print("Warnning" + str(values) + " in" + str(line))
        embeddings_index[word] = coefs
    f.close()
    print('Total %s word vectors.' % len(embeddings_index))
    return (X_train, y_train, X_test, y_test, content_L2_Train, L2_Train,
            content_L2_Test, L2_Test, number_of_classes_L2, word_index,
            embeddings_index, number_of_classes_L1)
Example #60
0
for k in range(m * n):
    row = m * n * [0]
    # Horizontal gridlines
    if ((k + 1) % n) != 0:
        row[k + 1] = 1
    if (k % n) != 0:
        row[k - 1] = 1
    # Vertical gridlines
    if (k + n) < (m * n):
        row[k + n] = 1
    if (k - n) >= 0:
        row[k - n] = 1
    # \ diagonals
    if (k % n) != (n - 1) and k < ((m - 1) * n):
        row[k + n + 1] = 1
    if (k % n) != 0 and k >= n:
        row[k - n - 1] = 1
    # / diagonals
    if (k % n) != (n - 1) and k >= n:
        row[k - n + 1] = 1
    if (k % n) != 0 and k < (m - 1) * n:
        row[k + n - 1] = 1
    graph.append(row)
numPaths = 0
A = matrix(graph)
for i in range(1, m * n):
    power = A**i
    print("Paths of length " + str(i) + ": " + str(power.sum()))
    numPaths += power.sum()
print("Total paths: " + str(numPaths))