Пример #1
1
def _test_matrix():
    #  A = numpy.mat([[1,2,3],[4,5,6]],numpy.float32)
    #  B = numpy.mat([[2,3],[4,5],[6,7]],numpy.float32)
    #  A = numpy.mat( numpy.array( numpy.random.random_sample((256,8192)), numpy.float32) )
    #  B = numpy.mat( numpy.array( numpy.random.random_sample((8192,256)), numpy.float32) )
    #  A = numpy.mat( numpy.array( numpy.random.random_sample((257,8191)), numpy.float32) )
    #  B = numpy.mat( numpy.array( numpy.random.random_sample((8191,257)), numpy.float32) )
    A = numpy.mat(numpy.array(numpy.random.random_sample((256, 65536)), numpy.float32))
    B = numpy.mat(numpy.array(numpy.random.random_sample((65536, 256)), numpy.float32))
    #  A = numpy.mat( numpy.array( numpy.random.random_sample((200,3000)), numpy.float32) )
    #  B = numpy.mat( numpy.array( numpy.random.random_sample((3000,3000,)), numpy.float32) )
    #  A = numpy.mat( numpy.array( numpy.random.random_sample((2048,2048)), numpy.float32) )
    #  B = numpy.mat( numpy.array( numpy.random.random_sample((2048,2048)), numpy.float32) )
    #  i = 3000
    #  A = numpy.mat( numpy.array( numpy.random.random_sample((i,i)), numpy.float32) )
    #  B = numpy.mat( numpy.array( numpy.random.random_sample((i,i)), numpy.float32) )
    #  A = numpy.mat([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]],numpy.float32)
    #  B = numpy.mat([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]],numpy.float32)
    #  print A
    #  print A*B
    print "[pycublas] shapes: ", A.shape, "*", B.shape, "=", (A.shape[0], B.shape[1])
    start = time.time()
    C1 = (CUBLASMatrix(A) * CUBLASMatrix(B)).np_matrix()
    t1 = time.time() - start
    print "[pycublas] time (CUBLAS): %fs" % t1
    start = time.time()
    C2 = A * B
    t2 = time.time() - start
    print "[pycublas] time (numpy): %fs" % t2
    print "[pycublas] speedup: %.1fX" % (t2 / t1)
    print "[pycublas] error (average per cell):", numpy.abs(C1 - C2).sum() / C2.size
Пример #2
1
    def test_rotate_inertia(self):
        """Are we obtaining the global inertia properly?"""

        # Create parameters.
        label = "seg1"
        pos = np.array([[1], [2], [3]])
        rot = inertia.rotate_space_123([pi / 2, pi / 2, pi / 2])
        solids = [self.solidAB, self.solidBC, self.solidCD]
        color = (1, 0, 0)

        # Create the segment.
        seg1 = seg.Segment(label, pos, rot, solids, color)

        # This inertia matrix describes two 1kg point masses at (0, 2, 1) and
        # (0, -2, -1) in the global reference frame, A.
        seg1._rel_inertia = mat([[10.0, 0.0, 0.0], [0.0, 2.0, -4.0], [0.0, -4.0, 8.0]])

        # If we want the inertia about a new reference frame, B, such that the
        # two masses lie on the yb axis we can rotate about xa through the angle
        # arctan(1/2). Note that this function returns R from va = R * vb.
        seg1._rot_mat = inertia.rotate_space_123((arctan(1.0 / 2.0), 0.0, 0.0))

        seg1.calc_properties()

        I_b = seg1.inertia

        expected_I_b = mat([[10.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 10.0]])

        testing.assert_allclose(I_b, expected_I_b)
Пример #3
1
def test_main():
    from numpy import mat

    ## Test FITELLIPSE - run through all possibilities
    # Example
    ## 1) Linear fit, bookstein constraint
    # Data points
    x = mat("1 2 5 7 9 6 3 8; 7 6 8 7 5 7 2 4")

    z, a, b, alpha = fitellipse(x, "linear")

    ## 2) Linear fit, Trace constraint
    # Data points
    x = mat("1 2 5 7 9 6 3 8; 7 6 8 7 5 7 2 4")

    z, a, b, alpha = fitellipse(x, "linear", constraint="trace")

    ## 3) Nonlinear fit
    # Data points
    x = mat("1 2 5 7 9 6 3 8; 7 6 8 7 5 7 2 4")

    z, a, b, alpha = fitellipse(x)

    # Changing the tolerance, maxits
    z, a, b, alpha = fitellipse(x, tol=1e-8, maxits=100)

    """
Пример #4
0
Файл: linalg.py Проект: bjzu/MF
def all(X, axis = None):
    """
    Test whether all elements along a given axis of sparse or dense matrix :param:`X` are nonzero.
    
    :param X: Target matrix.
    :type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia or :class:`numpy.matrix`
    :param axis: Specified axis along which nonzero test is performed. If :param:`axis` not specified, whole matrix is considered. 
    :type axis: `int`
    """
    if sp.isspmatrix(X):
        X = X.tocsr()
        assert axis == 0 or axis == 1 or axis == None, "Incorrect axis number."
        if axis is None:
            return len(X.data) == X.shape[0] * X.shape[1]
        res = [0 for _ in xrange(X.shape[1 - axis])] 
        def _caxis(now, row, col):
            res[col] += 1
        def _raxis(now, row, col):
            res[row] += 1
        check = _caxis if axis == 0 else _raxis
        now = 0
        for row in range(X.shape[0]):
            upto = X.indptr[row+1]
            while now < upto:
                col = X.indices[now]
                check(now, row, col)
                now += 1
        sol = [x == X.shape[0] if axis == 0 else x == X.shape[1] for x in res]
        return np.mat(sol) if axis == 0 else np.mat(sol).T
    else:
        return X.all(axis)
Пример #5
0
 def train(self, inp, out, training_weight=1.):
     inp = np.mat(inp).T
     out = np.mat(out).T
     deriv = []
     val = inp
     vals = [val]
     # forward calculation of activations and derivatives
     for weight,bias in self.__weights:
         val = weight*val
         val += bias
         deriv.append(self.__derivative(val))
         vals.append(self.__activation(val))
     deriv = iter(reversed(deriv))
     weights = iter(reversed(self.__weights))
     errs = []
     errs.append(np.multiply(vals[-1]-out, next(deriv)))
     # backwards propagation of errors
     for (w,b),d in zip(weights, deriv):
         errs.append(np.multiply(np.dot(w.T, errs[-1]), d))
     weights = iter(self.__weights)
     for (w,b),v,e in zip(\
             self.__weights,\
             vals, reversed(errs)):
         e *= self.__learning_rate*training_weight
         w -= e*v.T
         b -= e
     tmp = vals[-1]-out
     return np.dot(tmp[0].T,tmp[0])*.5*training_weight
Пример #6
0
def testDigits(kTup=('rbf', 10)):
    data, labels = loadImages('trainingDigits')
    b, alphas = smo(data, labels, 200, 0.0001, 10000, kTup)
    dataMat = np.mat(data)
    labelMat = np.mat(labels).transpose()
    svInd = np.nonzero(alphas.A > 0)[0]
    sVs = dataMat[svInd]
    labelSV = labelMat[svInd]
    print "There are %d Support Vectors" % np.shape(sVs)[0]
    m, n = np.shape(dataMat)
    errorCount = 0
    for i in xrange(m):
        kernelEval = kernelTransform(sVs, dataMat[i, :], kTup)
        predict = kernelEval.T * np.multiply(labelSV, alphas[svInd]) + b
        if np.sign(predict) != np.sign(labels[i]):
            errorCount += 1
    print "The training error rate is %f " % (float(errorCount) / m)
    data, labels = loadImages('testDigits')
    dataMat = np.mat(data)
    labelMat = np.mat(labels).transpose()
    m, n = np.shape(dataMat)
    errorCount = 0
    for i in xrange(m):
        kernelEval = kernelTransform(sVs, dataMat[i, :], kTup)
        predict = kernelEval.T * np.multiply(labelSV, alphas[svInd]) + b
        if np.sign(predict) != np.sign(labels[i]):
            errorCount += 1
    print "The test error rate is %f " % (float(errorCount) / m)
Пример #7
0
    def _get_rotate_and_skew_transform(x1, y1, x2, y2, x3, y3):
        """
        Retuen a transform that does
         (x1, y1) -> (x1, y1)
         (x2, y2) -> (x2, y2)
         (x2, y1) -> (x3, y3)

        It was intended to derive a skew transform that preserve the
        lower-left corner (x1, y1) and top-right corner(x2,y2), but
        change the the lower-right-corner(x2, y1) to a new position
        (x3, y3).
        """
        tr1 = mtransforms.Affine2D()
        tr1.translate(-x1, -y1)
        x2a, y2a = tr1.transform_point((x2, y2))
        x3a, y3a = tr1.transform_point((x3, y3))

        inv_mat = 1./(x2a*y3a-y2a*x3a) * np.mat([[y3a, -y2a],[-x3a, x2a]])

        a, b = (inv_mat * np.mat([[x2a], [x2a]])).flat
        c, d = (inv_mat * np.mat([[y2a], [0]])).flat

        tr2 = mtransforms.Affine2D.from_values(a, c, b, d, 0, 0)

        tr = (tr1 + tr2 + mtransforms.Affine2D().translate(x1, y1)).inverted().get_affine()

        return tr
Пример #8
0
 def __init__(self, X, regparam=1.0, number_of_clusters=2, kernel='LinearKernel', basis_vectors=None, Y = None, fixed_indices=None, callback=None,  **kwargs):
     kwargs['X'] = X 
     kwargs['kernel'] = kernel
     if basis_vectors is not None:
         kwargs['basis_vectors'] = basis_vectors
     self.svdad = adapter.createSVDAdapter(**kwargs)
     self.svals = np.mat(self.svdad.svals)
     self.svecs = np.mat(self.svdad.rsvecs)
     self.callbackfun = callback
     self.regparam = regparam
     self.constraint = 0
     self.labelcount = number_of_clusters
     self.size = X.shape[0] 
     #if self.labelcount == 2:
     #    self.oneclass = True
     #else:
     #    self.oneclass = False
     
     if Y is None:
         self.classvec = np.zeros(self.size, np.int)
     else:
         self.classvec = Y
     #self.size = self.classvec.shape[0]
     self.Y = -np.ones((self.size, self.labelcount))
     self.classcounts = np.zeros((self.labelcount), dtype = np.int32)
     for i in range(self.size):
         clazzind = self.classvec[i]
         self.Y[i, clazzind] = 1
         self.classcounts[clazzind] = self.classcounts[clazzind] + 1
     
     self.fixedindices = []
     if fixed_indices is not None:
         self.fixedindices = fixed_indices
     self.train()
Пример #9
0
 def Y(self):
     """Return Z-parameter matrix"""
     S = np.mat(self.S)
     E = np.mat(np.eye(self.n, self.n))
     Zref = self.z0 * E
     Gref = 1 / np.sqrt(np.real(self.z0)) * E
     return np.array(Gref**-1 * Zref**-1 * (S + E)**-1 * (E - S) * Gref)
Пример #10
0
    def CA(self):
#        return NPortZ(self).CA
        z0 = self.z0
        A = np.mat(self.A)
        T = np.matrix([[np.sqrt(z0), -(A[0,1]+A[0,0]*z0)/np.sqrt(z0)],
                        [-1/np.sqrt(z0), -(A[1,1]+A[1,0]*z0)/np.sqrt(z0)]])
        return np.array(T * np.mat(self.CS) * T.H)
Пример #11
0
 def __mul__(self, a):
     """Cascade of two n-ports"""
     selfA = np.mat(self.A)
     aA = np.mat(a.A)
     anport = NPortA(selfA * aA,
                     selfA * np.mat(a.CA) * selfA.H + self.CA )
     return self.__class__(anport)
Пример #12
0
 def Z(self):
     """Return Z-parameter matrix"""
     S = np.mat(self.S).astype(float)
     E = np.mat(np.eye(self.n, self.n))
     Zref = self.z0 * E
     Gref = 1 / np.sqrt(np.real(self.z0)) * E
     return np.array(Gref.I * (E - S).I * (S + E) * Zref * Gref)
Пример #13
0
def smoP(dataMatIn, classLabels, C, toler, maxIter):
	"""
	完整的线性SMO算法
	Parameters:
		dataMatIn - 数据矩阵
		classLabels - 数据标签
		C - 松弛变量
		toler - 容错率
		maxIter - 最大迭代次数
	Returns:
		oS.b - SMO算法计算的b
		oS.alphas - SMO算法计算的alphas
	"""
	oS = optStruct(np.mat(dataMatIn), np.mat(classLabels).transpose(), C, toler)					#初始化数据结构
	iter = 0 																						#初始化当前迭代次数
	entireSet = True; alphaPairsChanged = 0
	while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):							#遍历整个数据集都alpha也没有更新或者超过最大迭代次数,则退出循环
		alphaPairsChanged = 0
		if entireSet:																				#遍历整个数据集   						
			for i in range(oS.m):        
				alphaPairsChanged += innerL(i,oS)													#使用优化的SMO算法
				print("全样本遍历:第%d次迭代 样本:%d, alpha优化次数:%d" % (iter,i,alphaPairsChanged))
			iter += 1
		else: 																						#遍历非边界值
			nonBoundIs = np.nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]						#遍历不在边界0和C的alpha
			for i in nonBoundIs:
				alphaPairsChanged += innerL(i,oS)
				print("非边界遍历:第%d次迭代 样本:%d, alpha优化次数:%d" % (iter,i,alphaPairsChanged))
			iter += 1
		if entireSet:																				#遍历一次后改为非边界遍历
			entireSet = False
		elif (alphaPairsChanged == 0):																#如果alpha没有更新,计算全样本遍历 
			entireSet = True  
		print("迭代次数: %d" % iter)
	return oS.b,oS.alphas 																			#返回SMO算法计算的b和alphas
Пример #14
0
def cv_show( yEv, yEv_calc, disp = True, graph = True, grid_std = None):

	# if the output is a vector and the original is a metrix, 
	# the output is translated to a matrix. 
	if len( np.shape(yEv_calc)) == 1:	
		yEv_calc = np.mat( yEv_calc).T
	if len( np.shape(yEv)) == 1:
		yEv = np.mat( yEv).T

	r_sqr, RMSE = jchem.estimate_accuracy( yEv, yEv_calc, disp = disp)
	if graph:
		#plt.scatter( yEv.tolist(), yEv_calc.tolist())	
		plt.figure()	
		ms_sz = max(min( 4000 / yEv.shape[0], 8), 1)
		plt.plot( yEv.tolist(), yEv_calc.tolist(), '.', ms = ms_sz) # Change ms 
		ax = plt.gca()
		lims = [
			np.min([ax.get_xlim(), ax.get_ylim()]),  # min of both axes
			np.max([ax.get_xlim(), ax.get_ylim()]),  # max of both axes
		]
		# now plot both limits against eachother
		#ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
		ax.plot(lims, lims, '-', color = 'pink')
		plt.xlabel('Experiment')
		plt.ylabel('Prediction')
		if grid_std:
			plt.title( '($r^2$, std) = ({0:.2e}, {1:.2e}), RMSE = {2:.2e}'.format( r_sqr, grid_std, RMSE))
		else:
			plt.title( '$r^2$ = {0:.2e}, RMSE = {1:.2e}'.format( r_sqr, RMSE))
		plt.show()
	return r_sqr, RMSE
Пример #15
0
    def _determine_karray(equivalent_sizes, appended_sizes,
                          max_equivalent_size,
                          total_appended_size):

        n = len(equivalent_sizes)
        import numpy as np
        A = np.mat(np.zeros((n+1, n+1), dtype="d"))
        B = np.zeros((n+1), dtype="d")
        # AxK = B

        # populated A
        for i, (r, a) in enumerate(equivalent_sizes):
            A[i, i] = r
            A[i, -1] = -1
            B[i] = -a
        A[-1, :-1] = [r for r, a in appended_sizes]
        B[-1] = total_appended_size - sum([a for rs, a in appended_sizes])

        karray_H = (A.I*np.mat(B).T).A1
        karray = karray_H[:-1]
        H = karray_H[-1]

        if H > max_equivalent_size:
            karray = ((max_equivalent_size -
                      np.array([a for r, a in equivalent_sizes]))
                      / np.array([r for r, a in equivalent_sizes]))
        return karray
Пример #16
0
    def releaseK(self,Kl,rel):
        """Return a modified stiffness matrix to account for a moment release
        at one of the ends.  Kl is the original matrix, dx, dy are projections of the
        member, and 'rel' is 2 or 5 to identify the local dof # of the released dof.
        Both KL and KG are returned if the transformation matrix, T, is provided"""
        L = self.L
        if rel == 2:
            if Kl[5,5] == 0.:   # is other end also pinned?
                em = np.mat([1.,0.]).T    # corrective end moments, far end pinned
            else:
                em = np.mat([1.,0.5]).T   # corrective end moments, far end fixed
        elif rel == 5:
            if Kl[2,2] == 0.:
                em = np.mat([0.,1.]).T
            else:
                em = np.mat([0.5,1.]).T
        else:
            raise ValueError("Invalid release #: {}".format(rel))
        Tf = np.mat([[0.,0.],[1./L,1./L],[1.,0.],[0.,0.],[-1./L,-1./L],[0.,1.]])
        M = Tf*em

        K = Kl.copy()    
        K[:,1] -= M*K[rel,1]  # col 1 - forces for unit vertical displacment at j-end
        K[:,2] -= M*K[rel,2]  # col 2 - forces for unit rotation at j-end
        K[:,4] -= M*K[rel,4]  # col 4 - forces for unit vertical displacment at k-end
        K[:,5] -= M*K[rel,5]  # col 5 - forces for unit rotation at k-end
        return K
Пример #17
0
    def test_lsim_double_integrator(self):
        # Note: scipy.signal.lsim fails if A is not invertible
        A = np.mat("0. 1.;0. 0.")
        B = np.mat("0.; 1.")
        C = np.mat("1. 0.")
        D = 0.
        sys = StateSpace(A, B, C, D)

        def check(u, x0, xtrue):
            _t, yout, xout = forced_response(sys, t, u, x0)
            np.testing.assert_array_almost_equal(xout, xtrue, decimal=6)
            ytrue = np.squeeze(np.asarray(C.dot(xtrue)))
            np.testing.assert_array_almost_equal(yout, ytrue, decimal=6)

        # test with zero input
        npts = 10
        t = np.linspace(0, 1, npts)
        u = np.zeros_like(t)
        x0 = np.array([2., 3.])
        xtrue = np.zeros((2, npts))
        xtrue[0, :] = x0[0] + t * x0[1]
        xtrue[1, :] = x0[1]
        check(u, x0, xtrue)

        # test with step input
        u = np.ones_like(t)
        xtrue = np.array([0.5 * t**2, t])
        x0 = np.array([0., 0.])
        check(u, x0, xtrue)

        # test with linear input
        u = t
        xtrue = np.array([1./6. * t**3, 0.5 * t**2])
        check(u, x0, xtrue)
Пример #18
0
    def setUp(self):
        self.a = np.array([[1,2,3],[4,0,5]])
        self.space_s = Space(SparseMatrix(np.mat(self.a)),
                                         ["a", "b"], ["f1","f2", "f3"])

        self.space_d = Space(DenseMatrix(np.mat(self.a)),
                                         ["a", "b"], ["f1","f2", "f3"])
Пример #19
0
 def test_train2(self):
     dim_ = 2
     dim_1 = 3
     dim_2 = 5        
     for dim in [dim_1 + dim_2, dim_1 + dim_2 + 2]:
         expected_a = np.mat(np.random.random((dim_,dim_1)))
         expected_b = np.mat(np.random.random((dim_,dim_2)))     
         m1 = np.mat(np.random.random((dim,dim_1)))     
         m2 = np.mat(np.random.random((dim,dim_2)))
        
         ph = np.mat(expected_a*m1.T + expected_b*m2.T)
        
         comp_model = FullAdditive(learner=LstsqRegressionLearner(intercept=False))
         comp_model._train(DenseMatrix(m1),DenseMatrix(m2),
                                    DenseMatrix(ph).transpose())
         np.testing.assert_array_almost_equal(comp_model._mat_a_t.transpose().mat,
                                              expected_a, 10)
         np.testing.assert_array_almost_equal(comp_model._mat_b_t.transpose().mat,
                                              expected_b, 10)
         
     for dim in [dim_1 + dim_2 + 6, dim_1 + dim_2 + 20]:
         expected_a = np.mat(np.random.random((dim_,dim_1)))
         expected_b = np.mat(np.random.random((dim_,dim_2)))     
         m1 = np.mat(np.random.random((dim,dim_1)))     
         m2 = np.mat(np.random.random((dim,dim_2)))
        
         ph = np.mat(expected_a*m1.T + expected_b*m2.T)
                    
         comp_model = FullAdditive(learner=LstsqRegressionLearner(intercept=True))
         comp_model._train(DenseMatrix(m1),DenseMatrix(m2),
                                    DenseMatrix(ph).transpose())
         np.testing.assert_array_almost_equal(comp_model._mat_a_t.transpose().mat,
                                              expected_a, 10)
         np.testing.assert_array_almost_equal(comp_model._mat_b_t[:-1,:].transpose().mat,
                                              expected_b, 10)
Пример #20
0
    def test_top_feat_selection(self):
        test_cases = [(self.a, np.mat([[3,1],[5,4]]), [2,0], 2),
                      (self.a, np.mat([[3],[5]]), [2], 1),
                      (self.a, np.mat([[3,1,2],[5,4,0]]), [2,0,1], 6),
                      ]

        for in_mat, expected_mat, expected_perm, no_cols in test_cases:
            fs = TopFeatureSelection(no_cols)

            out_mat, perm = fs.apply(DenseMatrix(in_mat))
            np.testing.assert_array_equal(out_mat.mat, expected_mat)
            self.assertListEqual(perm, expected_perm)

            out_mat, perm = fs.apply(SparseMatrix(in_mat))
            np.testing.assert_array_equal(out_mat.mat.todense(), expected_mat)
            self.assertListEqual(perm, expected_perm)

            fs = TopFeatureSelection(no_cols, criterion="length")

            out_mat, perm = fs.apply(DenseMatrix(in_mat))
            np.testing.assert_array_equal(out_mat.mat, expected_mat)
            self.assertListEqual(perm, expected_perm)

            out_mat, perm = fs.apply(SparseMatrix(in_mat))
            np.testing.assert_array_equal(out_mat.mat.todense(), expected_mat)
            self.assertListEqual(perm, expected_perm)

        self.assertRaises(ValueError, TopFeatureSelection, 0)
        self.assertRaises(ValueError, TopFeatureSelection, 2, criterion="something")
Пример #21
0
def main():
    image0 = cv.LoadImage('pic1.jpg')
    image1 = cv.LoadImage('pic2.jpg')
    buff = cv.LoadImage('buff.jpg')
    buff2 = cv.LoadImage('buff.jpg')
    #image[ y, x , rgb ]
    features0 = numpy.mat([[1771,1111],[2073.5,1056],[1963.5,1259.5],[1732.5,1435.5],[2095.5,1347.5],
                    [1908.5,1468.5],[1941.5,1666.5],[1210,1705],[2156,1551],[1534.5,2040.5],
                    [1952.5,1941.5],[1837,418],[1930.5,1100],[1611.5,1133],[2194.5,1039.5],
                    [1848,797.5],[2101,775.5],[1545.5,1408],[2167,1303.5]])
    features1 = numpy.mat([[1738,1111],[2117.5,1094.5],[1936,1309],[1710.5,1457.5],[2161.5,1430],
                    [1919.5,1512.5],[1925,1732.5],[1342,1633.5],[2420,1650],[1644.5,2029.5],
                    [2128.5,2035],[1941.5,374],[1936,1122],[1578.5,1111],[2288,1089],[1798.5,786.5],
                    [2095.5,803],[1540,1391.5],[2293.5,1364]])
    fund = fundamental(features0, features1)
    H1, H2 = H1H2Calc(fund)
    prewarp1, prewarp2 = WarpImages(image0, image1, H1, H2, buff, buff2)
    features0, features1 = formatForFund(features0, features1)
    warpFeatures0 = WarpFeatures(features0.T, H1)
    warpFeatures1 = WarpFeatures(features1.T, H2)
    #Transition(prewarp1, prewarp2, warpFeatures0, warpFeatures1, features0, features1)
    cv.NamedWindow('display')
##    cv.NamedWindow('prewarp1')
##    cv.NamedWindow('prewarp2')
##    cv.ShowImage('prewarp1', prewarp1)
##    cv.WaitKey(0)
##    cv.ShowImage('prewarp2', prewarp2)
##    cv.WaitKey(0)
    Linear(prewarp1, prewarp2, H1, H2, buff)#, writer)
def ball_filter6(dt,R=1., Q = 0.1):
    f1 = KalmanFilter(dim=6)
    g = 10

    f1.F = np.mat ([[1., dt, dt**2,  0,       0,  0],
                    [0,  1., dt,     0,       0,  0],
                    [0,  0,  1.,     0,       0,  0],
                    [0,  0,  0.,    1., dt, -0.5*dt*dt*g],
                    [0,  0,  0,      0, 1.,      -g*dt],
                    [0,  0,  0,      0, 0.,      1.]])

    f1.H = np.mat([[1,0,0,0,0,0],
                   [0,0,0,0,0,0],
                   [0,0,0,0,0,0],
                   [0,0,0,1,0,0],
                   [0,0,0,0,0,0],
                   [0,0,0,0,0,0]])


    f1.R = np.mat(np.eye(6)) * R

    f1.Q = np.zeros((6,6))
    f1.Q[2,2] = Q
    f1.Q[5,5] = Q
    f1.x = np.mat([0, 0 , 0, 0, 0, 0]).T
    f1.P = np.eye(6) * 50.
    f1.B = 0.
    f1.u = 0

    return f1
Пример #23
0
 def buildStump(self,dataArr,classLabels,D):  # D is a vector of the data's weight
     dataMatrix = np.mat(dataArr)
     labelMat = np.mat(classLabels).T
     m,n = np.shape(dataMatrix)
     numSteps = 10.0
     bestStump = {}
     bestClassEst = np.mat(np.zeros((m,1)))
     minError = np.inf
     for i in range(n):
         rangeMin = dataMatrix[:,i].min()
         rangeMax = dataMatrix[:,i].max()
         stepSize = (rangeMax - rangeMin)/numSteps
         for j in range(-1,int(numSteps) + 1):
             for inequal in ['lt','gt']:
                 thresholdVal = (rangeMin + float(j) * stepSize)
                 predictedVals = self.stumpDecisionTree(dataMatrix,i,thresholdVal,inequal)
                 # print("Predict value:" , predictedVals.T)
                 errArr = np.mat(np.ones((m,1)))
                 errArr[predictedVals == labelMat] = 0   # set 0 to the vector which is classified correctly
                 # print(predictedVals.T," ",labelMat.T)
                 weightedError = D.T * errArr
                 # print("split: dim %d, threshold value %.2f ,threshold inequal: %s, the weighted error is %.3f" %(i,thresholdVal,inequal,weightedError))
                 if weightedError < minError:
                     minError = weightedError
                     bestClassEst = predictedVals.copy()
                     bestStump['dimension'] = i
                     bestStump['inequal'] = inequal
                     bestStump['threshold'] = thresholdVal
     return bestStump,minError,bestClassEst
Пример #24
0
def run(V = None, V1 = None):
    """
    Run examples.
    
    :param V: Target matrix to estimate.
    :type V: :class:`numpy.matrix`
    :param V1: (Second) Target matrix to estimate used in multiple NMF (e. g. SNMNMF).
    :type V1: :class:`numpy.matrix`
    """
    if V == None or V1 == None:
        prng = np.random.RandomState(42)
        # construct target matrix 
        V = abs(np.mat(prng.normal(loc = 0.0, scale = 1.0, size = (20, 30))))
        V1 = abs(np.mat(prng.normal(loc = 0.0, scale = 1.0, size = (20, 25))))
    run_snmnmf(V, V1)
    run_bd(V)
    run_bmf(V)
    run_icm(V)
    run_lfnmf(V)
    run_lsnmf(V)
    run_nmf(V)
    run_nsnmf(V)
    run_pmf(V)
    run_psmf(V)
    run_snmf(V)
Пример #25
0
def r_t_mat_anallo(an1, an2):
    """ Returns R12, T12, R21, T21 at an interface between thin films.

        R12 is the reflection matrix from Anallo 1 off Anallo 2

        The sign of elements in T12 and T21 is fixed to be positive,
        in the eyes of `numpy.sign`
    """
    if len(an1.k_z) != len(an2.k_z):
        raise ValueError, "Need the same number of plane waves in \
        Anallos %(an1)s and %(an2)s" % {'an1' : an1, 'an2' : an2}

    Z1 = an1.Z()
    Z2 = an2.Z()

    R12 = np.mat(np.diag((Z2 - Z1)/(Z2 + Z1)))
    # N.B. there is potentially a branch choice problem here, stemming
    # from the normalisation to unit flux.
    # We normalise each field amplitude by
    # $chi^{\pm 1/2} = sqrt(k_z/k)^{\pm 1} = sqrt(Z/Zc)^{\pm 1}$
    # The choice of branch in those square roots must be the same as the
    # choice in the related square roots that we are about to take:
    T12 = np.mat(np.diag(2.*sqrt(Z2)*sqrt(Z1)/(Z2+Z1)))
    R21 = -R12
    T21 = T12

    return R12, T12, R21, T21
Пример #26
0
def test_rotate_inertia():
    """Are we obtaining the global inertia properly?"""

    density = 1.5
    height = 4
    height_vec = array([[0], [0], [height]])

    # One thickness is 0.
    r0 = 5; t0 = 0; r1 = 2; t1 = 2;

    stad1 = Stadium('Ls1: umbilicus', 'thicknessradius', t0, r0)
    stad2 = Stadium('Lb1: mid-arm', 'thicknessradius', t1, r1)

    solidA = StadiumSolid('solid', density, stad1, stad2, height)

    # This inertia matrix describes two 1kg point masses at (0, 2, 1) and
    # (0, -2, -1) in the global reference frame, A.
    solidA._rel_inertia = mat([[10.0, 0.0, 0.0],
                             [0.0, 2.0, -4.0],
                             [0.0, -4.0, 8.0]])

    # If we want the inertia about a new reference frame, B, such that the
    # two masses lie on the yb axis we can rotate about xa through the angle
    # arctan(1/2). Note that this function returns R from va = R * vb.
    solidA._rot_mat = inertia.rotate_space_123((arctan(1.0 / 2.0), 0.0, 0.0))

    solidA.calc_properties()

    I_b = solidA.inertia

    expected_I_b = mat([[10.0, 0.0, 0.0],
                        [0.0, 0.0, 0.0],
                        [0.0, 0.0, 10.0]])

    testing.assert_allclose(I_b, expected_I_b)
Пример #27
0
def run_bd(V):
    """
    Run Bayesian decomposition.
    
    :param V: Target matrix to estimate.
    :type V: :class:`numpy.matrix`
    """
    rank = 10
    model = nimfa.mf(V, 
                  seed = "random_c", 
                  rank = rank, 
                  method = "bd", 
                  max_iter = 12, 
                  initialize_only = True,
                  alpha = np.mat(np.zeros((V.shape[0], rank))),
                  beta = np.mat(np.zeros((rank, V.shape[1]))),
                  theta = .0,
                  k = .0,
                  sigma = 1., 
                  skip = 100,
                  stride = 1,
                  n_w = np.mat(np.zeros((rank, 1))),
                  n_h = np.mat(np.zeros((rank, 1))),
                  n_sigma = False)
    fit = nimfa.mf_run(model)
    print_info(fit)
Пример #28
0
 def adaBoostTrainDecisionStump(self,dataArr,classLabels,numInt=40):
     weakDecisionStumpArr = []
     m = np.shape(dataArr)[0]
     weight = np.mat(np.ones((m,1))/m)     # init the weight of the data.Normally, we set the initial weight is 1/n
     aggressionClassEst = np.mat(np.zeros((m,1)))
     for i in range(numInt): # classEst == class estimation
         bestStump,error,classEst = self.buildStump(dataArr,classLabels,weight) # D is a vector of the data's weight
         # print("D: ",weight.T)
         alpha = float(0.5 * np.log((1.0 - error)/max(error , 1e-16)))   # alpha is the weighted of the weak classifier
         bestStump['alpha'] = alpha
         weakDecisionStumpArr.append(bestStump)
         exponent = np.multiply(-1* alpha * np.mat(classLabels).T , classEst) # calculte the exponent [- alpha * Y * Gm(X)]
         print("classEst :",classEst.T)
         weight = np.multiply(weight,np.exp(exponent)) # update the weight of the data, w_m = e^[- alpha * Y * Gm(X)]
         weight = weight/weight.sum()  # D.sum() == Z_m (Normalized Factor) which makes sure the D_(m+1) can be a probability distribution
         # give every estimated class vector (the classified result of the weak classifier) a weight
         aggressionClassEst += alpha*classEst
         print("aggression classEst: ",aggressionClassEst.T)
         # aggressionClassError = np.multiply(np.sign(aggressionClassEst) != np.mat(classLabels).T, np.ones((m,1)))
         # errorRate = aggressionClassError.sum()/m
         errorRate = (np.sign(aggressionClassEst) != np.mat(classLabels).T).sum()/m # calculate the error classification
         # errorRate = np.dot((np.sign(aggressionClassEst) != np.mat(classLabels).T).T,np.ones((m,1)))/m
         print("total error: ",errorRate,"\n")
         if errorRate == 0:
             break
     return weakDecisionStumpArr
Пример #29
0
    def __init__(self,name,updateRateHz,messagingbus,sendmessagesto):

        print "Instantiating Force & Moment Test Model ",name

        # Call superclass constructor
        Model.__init__(self,name,updateRateHz,messagingbus,sendmessagesto)


        # Member variables ----------------------------------------------------
        #   Inputs

        self.timeOn                 = 0.0
        self.timeOff                = 0.0
        self.forceStationInput      = mat('0.0;0.0;0.0')
        self.momentStationInput     = mat('0.0;0.0;0.0')

        self.forceStation           = mat('0.0;0.0;0.0')
        self.momentStation          = mat('0.0;0.0;0.0')

        # Register Input Parameters -------------------------------------------
        #                       Input Parameter Name,   Member Variable Name,       Example of Type)
        self.registerInputParam('forceStation',        'forceStationInput',              self.forceStationInput )
        self.registerInputParam('momentStation',       'momentStationInput',             self.momentStationInput)
        self.registerInputParam('timeOn',             'timeOn',                     self.timeOn)
        self.registerInputParam('timeOff',     'timeOff',                           self.timeOff)
Пример #30
0
def custom_convergence_check(x, dx, residuum, er, ea, eresiduum, vector_norm=lambda v: abs(v), debug=False):
    all_check_results = []
    if not hasattr(x, 'shape'):
        x = numpy.mat(numpy.array(x))
        dx = numpy.mat(numpy.array(dx))
        residuum = numpy.mat(numpy.array(residuum))
    if x.shape[0]:
        if not debug:
            ret = numpy.allclose(x, x + dx, rtol=er, atol=ea) and \
                numpy.allclose(residuum, numpy.zeros(
                               residuum.shape), atol=eresiduum, rtol=0)
        else:
            for i in range(x.shape[0]):
                if vector_norm(dx[i, 0]) < er * vector_norm(x[i, 0]) + ea and vector_norm(residuum[i, 0]) < eresiduum:
                    all_check_results.append(True)
                else:
                    all_check_results.append(False)
                if not all_check_results[-1]:
                    break

            ret = not (False in all_check_results)
    else:
        # We get here when there's no variable to be checked. This is because there aren't variables
        # of this type.
        # Eg. the circuit has no voltage sources nor voltage defined elements. In this case, the actual check is done
        # only by current_convergence_check, voltage_convergence_check always
        # returns True.
        ret = True

    return ret, all_check_results
Пример #31
0
    def __init__(self, feed_dict, full_analysis):
        # initiate common vatiables
        self.returns = feed_dict['input']
        self.covar = self.returns.cov()
        del feed_dict['input']

        columns = []
        self.inital_weights = []
        for col in self.returns.columns:
            name = col.split('_')[0]
            self.inital_weights.append(get_quote_yahoo(name).marketCap)
            columns.append(name)
        self.inital_weights = np.array(self.inital_weights)
        self.inital_weights = self.inital_weights / np.sum(self.inital_weights)

        # save parameters in case replication is desired
        self.Parameters = deepcopy(feed_dict)
        objective, features, constraints, simulations, methods, tol, maxiter, disp = oc.unfeeder(
            feed_dict)

        try:
            views = []
            chg = []
            for view in objective['target']:
                chg.append(view[0] / 100)
                views.append(view[1:])
            views = np.array(views)
            chg = np.array(chg)
        except:
            raise AttributeError(
                'Black Litterman requires target views to be specified.')

        # features
        if 'benchmark' in features:
            self.benchmark = features['benchmark']
        else:
            raise AttributeError('Black Litterman requires a benchmark.')

        self.periodicity = _embed_periodicity(features['periodicity'])
        if methods == 'arithmetic':
            self.mean_returns = self.returns.mean() * self.periodicity
            self.benchmark_mean = self.benchmark.mean() * self.periodicity
        if methods == 'geometric':
            self.mean_returns = _geomean(self.returns, self.periodicity)
            self.benchmark_mean = _geomean(self.benchmark, self.periodicity)

        if 'margin_int_rate' in features:
            self.margin_rate = features['margin_int_rate']
        else:
            self.margin_rate = 0

        if 'rf' in features:
            self.rf = features['rf']
        else:
            self.rf = 0
        # constraints
        if 'min_weights' in constraints:
            self.weight_bounds = tuple(
                zip(constraints['min_weights'], constraints['max_weights']))
        else:
            self.weight_bounds = None
        constraints = self.get_constraints(constraints)

        # return computation
        A = (self.benchmark_mean - self.rf) / (
            (self.benchmark.std()**2) * self.periodicity)

        pi = A * np.matmul(np.mat(self.covar * self.periodicity),
                           self.inital_weights)
        omega = np.matmul(
            np.matmul(views, np.mat(self.covar * self.periodicity)), views.T)

        e1 = np.mat(
            np.mat(self.covar * self.periodicity).I +
            np.matmul(np.matmul(views.T,
                                np.mat(omega).I), views)).I
        e2 = np.matmul(np.mat(self.covar * self.periodicity).I,
                       pi) + np.matmul(np.matmul(views.T,
                                                 np.mat(omega).I), chg).T
        self.Expected_Returns = np.matmul(e1, e2)

        # optimal portfolio
        self.OptiParam = minimize(self.optimize_sharpe_ratio,
                                  self.inital_weights,
                                  method='SLSQP',
                                  bounds=self.weight_bounds,
                                  tol=tol,
                                  constraints=constraints,
                                  options={
                                      'maxiter': maxiter,
                                      'disp': disp
                                  })
        if not self.OptiParam['success']:
            print('Warning!', self.OptiParam['message'])
        tanw = self.OptiParam['x']
        tanww = pd.DataFrame(tanw).T
        tanww.columns = columns
        tanr = self.weighted_annual_return(tanw)
        margin = np.sum(np.abs(tanw)) - 1
        tanra = tanr - self.rf - self.margin_rate * margin
        tans = self.weighted_annual_stddev(tanw)
        self.Optimal = {
            'Weights': tanww,
            'Return': tanr,
            'AdjReturn': tanra,
            'StdDev': tans,
            'Sharpe': tanra / tans,
            'Series': self.weighted_return(tanw)
        }

        # efficent frontier
        if full_analysis == True:
            analyze = np.linspace(max(self.Optimal['Return'] / 4, 0.0001),
                                  min(self.Optimal['Return'] * 4, 0.5),
                                  simulations)
            efficent = []
            eff = []
            for ret in analyze:
                e = self.optimize_efficent(ret, constraints, tol, maxiter,
                                           disp)
                we = e['x']
                wew = pd.DataFrame(we).T
                wew.columns = columns
                margin = np.sum(np.abs(we)) - 1
                re = self.weighted_annual_return(
                    we) - self.rf - self.margin_rate * margin
                se = self.weighted_annual_stddev(we)
                efficent.append({
                    'Weights': wew,
                    'Returns': re,
                    'StdDevs': se,
                    'Sharpe': re / se
                })
                eff.append(e)
            self.EfficentParam = eff
            self.EfficentFrontier = efficent

        # close
        self.ConstituentSharpe = self.get_sharpes(columns)
Пример #32
0
import numpy as np

num = 100

a = np.mat(np.arange(1, num + 1))
b = np.transpose(a)
c = a * b
d = np.sum(a)**2
print c
print d
print d - c
Пример #33
0
for i in range(X.shape[0]):
    for j in range(X.shape[1]):
        for k in range(Y.shape[0]):
            for m in range(Y.shape[2]):
                R2[i, j, k, m] = sum(X[i, j, :] * Y[k, :, m])
print(np.array_equal(R, R2))

print('')
print('------------- Part 6 ---------------')
A = np.array([[1, 2, 3], [2, 2, 2], [3, 3, 3]])

B = np.array([[3, 2, 1], [1, 2, 3], [-1, -2, -3]])
R = A * B
print(R)

MA = np.mat(A)
MB = np.mat(B)
R = MA * MB
print(R)

print('')
print('------------ Part 7 ---------------')
A = np.array([[11, 12, 13], [21, 22, 23], [31, 32, 33]])
B = np.array([[11, 102, 13], [201, 22, 203], [31, 32, 303]])
print(np.array_equal(A, B))
print(np.array_equal(A, A))

print('')
print('------------- Part 8 --------------')
a = np.array([[True, True], [False, False]])
b = np.array([[True, False], [True, False]])
Пример #34
0
def ols_high_d_category(data_df, formula=None, robust=False, c_method='cgm', psdef=True, epsilon=1e-8, max_iter=1e6,
                        debug=False):
    """

    :param data_df: Dataframe of relevant data
    :type data_df: pd.DataFrame

    :param formula: Formula takes the form of dependent_variable~continuous_variable|fixed_effect|clusters
    :type formula: str

    :param robust: bool value of whether to get a robust variance
    :type robust: bool

    :param c_method: method used to calculate multi-way clusters variance. Possible choices are:
            - 'cgm'
            - 'cgm2'
    :type c_method: str

    :param psdef:if True, replace negative eigenvalue of variance matrix with 0 (only in multi-way clusters variance)
    :type psdef: bool

    :param epsilon: tolerance of the demean process
    :type epsilon: float

    :param max_iter: max iteration of the demean process
    :type max_iter: float

    :param debug: If true then print all individual stage prints, defaults to false.
    :type debug: bool

    :return:params,df,bse,tvalues,pvalues,rsquared,rsquared_adj,fvalue,f_pvalue,variance_matrix,fittedvalues,resid,summary

    Example
    -------
    y~x+x2|id+firm|id'

    """
    total_start = time.time()

    out_col, consist_col, category_col, cluster_col = formula_transform(formula)

    consist_var = []
    if len(category_col) == 0 or len(consist_col) == 0:
        demeaned_df = data_df.copy()
        const_consist = sm.add_constant(demeaned_df[consist_col])
        consist_col = ['const'] + consist_col
        demeaned_df['const'] = const_consist['const']
        rank = 0
    else:
        for i in consist_col:
            consist_var.append(i)
        consist_var.append(out_col[0])
        start = time.time()
        demeaned_df = demean_dataframe(data_df, consist_var, category_col, epsilon, max_iter)
        end = time.time()
        start = time.process_time()
        rank = cal_df(data_df, category_col)
        end = time.process_time()

    model = sm.OLS(demeaned_df[out_col], demeaned_df[consist_col])
    result = model.fit()
    demeaned_df['resid'] = result.resid
    n = demeaned_df.shape[0]
    k = len(consist_col)
    f_result = OLSFixed()
    f_result.out_col = out_col
    f_result.consist_col = consist_col
    f_result.category_col = category_col
    f_result.data_df = data_df.copy()
    f_result.demeaned_df = demeaned_df
    f_result.params = result.params
    f_result.df = result.df_resid - rank

    # Now we need to update the standard errors of the OLS based on robust and clustering
    if (len(cluster_col) == 0) & (robust is False):
        std_error = result.bse * np.sqrt((n - k) / (n - k - rank))
        covariance_matrix = result.normalized_cov_params * result.scale * result.df_resid / f_result.df
    elif (len(cluster_col) == 0) & (robust is True):
        covariance_matrix = robust_err(demeaned_df, consist_col, n, k, rank)
        std_error = np.sqrt(np.diag(covariance_matrix))
    else:
        if category_col[0] == '0':
            nested = False
        else:
            nested = is_nested(demeaned_df, category_col, cluster_col, consist_col)

        covariance_matrix = clustered_error(demeaned_df, consist_col, cluster_col, n, k, rank, nested=nested,
                                            c_method=c_method, psdef=psdef)
        std_error = np.sqrt(np.diag(covariance_matrix))

    f_result.bse = std_error
    # print(f_result.bse)
    f_result.variance_matrix = covariance_matrix
    f_result.tvalues = f_result.params / f_result.bse
    f_result.pvalues = pd.Series(2 * t.sf(np.abs(f_result.tvalues), f_result.df), index=list(result.params.index))

    f_result.rsquared = result.rsquared
    f_result.rsquared_adj = 1 - (len(data_df) - 1) / (result.df_resid - rank) * (1 - result.rsquared)
    tmp1 = np.linalg.solve(f_result.variance_matrix, np.mat(f_result.params).T)
    tmp2 = np.dot(np.mat(f_result.params), tmp1)
    f_result.fvalue = tmp2[0, 0] / result.df_model
    if len(cluster_col) > 0 and c_method == 'cgm':
        f_result.f_pvalue = f.sf(f_result.fvalue, result.df_model,
                                 min(min_clust(data_df, cluster_col) - 1, f_result.df))
        f_result.f_df_proj = [result.df_model, (min(min_clust(data_df, cluster_col) - 1, f_result.df))]
    else:
        f_result.f_pvalue = f.sf(f_result.fvalue, result.df_model, f_result.df)
        f_result.f_df_proj = [result.df_model, f_result.df]

    # std err=diag( np.sqrt(result.normalized_cov_params*result.scale*result.df_resid/f_result.df) )
    f_result.fittedvalues = result.fittedvalues
    f_result.resid = result.resid
    f_result.full_rsquared, f_result.full_rsquared_adj, f_result.full_fvalue, f_result.full_f_pvalue, f_result.f_df_full\
        = cal_fullmodel(data_df, out_col, consist_col, rank, RSS=sum(result.resid ** 2))
    f_result.nobs = result.nobs
    f_result.yname = out_col
    f_result.xname = consist_col
    f_result.resid_std_err = np.sqrt(sum(result.resid ** 2) / (result.df_resid - rank))
    if len(cluster_col) == 0:
        f_result.cluster_method = 'no_cluster'
        if robust:
            f_result.Covariance_Type = 'robust'
        else:
            f_result.Covariance_Type = 'nonrobust'
    else:
        f_result.cluster_method = c_method
        f_result.Covariance_Type = 'clustered'

    end = time.time()
    if debug:
        print(f"Total {end - total_start}")
    return f_result  # , demeaned_df
Пример #35
0
    def _sample_cze(self):
        # Chinese Restaurant Precoss
        for i in range(self._N):
            old_r = self.c[i]
            if old_r != -1:
                if self.n[old_r] == 1:
                    del self.n[old_r]
                    del self.nu[old_r]
                    del self.kappa[old_r]
                    del self.phi[old_r]
                else:
                    self.n[old_r] -= 1
            like_ref, like_sym_s = [], []
            classes = {}
            _r = 0

            x = self._X[i]
            sent = self._Sent[i]
            index = self._Index[i].astype(int)
            like_dis = st.norm.pdf(x[:, 2], self.mu, np.sqrt(1. / self.lamda))
            prob_dis = like_dis / like_dis.sum()
            ang = np.arctan2(x[:, 1], x[:, 0])
            like_sym_o = np.mat(self.psi[index][:, sent])

            for r in self.n:
                if r == self.new_class:
                    nu = self._init_nu()
                    kappa = self._init_kappa()
                    phi = self._init_phi()  # concept word prob
                    pi = self.alpha
                    new_nu = copy.copy(nu)
                    new_kappa = copy.copy(kappa)
                    new_phi = copy.copy(phi)
                else:
                    nu = self.nu[r]
                    phi = self.phi[r]
                    kappa = self.kappa[r]
                    pi = self.n[r]
                like_ref.append(st.vonmises.pdf(ang, kappa, nu) * pi)
                like_sym_s.append(phi[sent])
                classes[_r] = r
                _r += 1

            R = len(classes)
            K_n = len(x)
            D_n = len(sent)

            like_ref = np.array(like_ref) * prob_dis
            prob_ref = like_ref / like_ref.sum()
            like_sym_s = np.array(like_sym_s)
            prob_sym_s = like_sym_s / like_sym_s.sum()
            like_sym_o = np.array(like_sym_o)
            prob_sym_o = like_sym_o / like_sym_o.sum()

            # the joint probability between concept,referent,symbol, P_r,k * P_r,d * P_k,_d
            like_rkd_d = np.array([[[[
                prob_ref[r, k] * prob_sym_s[r, d] *
                prob_sym_o[k, _d] if _d != d else 0 for _d in range(D_n)
            ] for d in range(D_n)] for k in range(K_n)] for r in range(R)])

            prob_rkd_d = like_rkd_d / like_rkd_d.sum()
            prob_rkd_d = prob_rkd_d.reshape(R * K_n * D_n * D_n)
            C_rkd_d = rd.multinomial(1, prob_rkd_d).reshape((R, K_n, D_n, D_n))
            # concept
            new_r = classes[np.where(C_rkd_d == 1)[0][0]]
            # reference
            new_k = np.where(C_rkd_d == 1)[1][0]
            # concept word
            new_d_s = sent[np.where(C_rkd_d == 1)[2][0]]
            # reference word
            new_d_o = sent[np.where(C_rkd_d == 1)[3][0]]

            self.c[i] = new_r
            self.z[i] = new_k
            self.e[i] = [new_d_s, new_d_o]
            self.n[new_r] += 1

            if new_r == self.new_class:
                self.nu[self.new_class] = new_nu
                self.kappa[self.new_class] = new_kappa
                self.phi[self.new_class] = new_phi
                j = 0
                while True:
                    if not j in self.n:
                        self.n[j] = 0
                        self.new_class = j
                        break
                    j += 1
Пример #36
0
def getsymbol(modulationType):
    "GETALPHABET    Generate set of alphabet according to modulation type."
    # Create basic mapping of signal symbols
    if modulationType == '2pam':
        symbolMap = np.mat('1, -1')
    elif modulationType == '4pam':
        symbolMap = np.mat('-3, -1, 1, 3')
    elif modulationType == '8pam':
        symbolMap = np.mat('-7, -5, -3, -1, 1, 3, 5, 7')
    elif modulationType == '2psk':
        symbolMap = np.mat('1j, -1j')
    elif modulationType == '4psk':
        symbolMap = np.mat('1, 1j, -1, -1j')
    elif modulationType == '8psk':
        symbolMap = np.mat('np.sqrt(2), 1+1j, np.sqrt(2)*1j, -1+1j,\
            -np.sqrt(2), -1-1j, -np.sqrt(2)*1j, 1-1j')
    elif modulationType == '4qam':
        symbolMap = np.mat('1+1j, -1+1j, -1-1j, 1-1j')
    elif modulationType == '16qam':
        symbolMap = np.mat('3+3j, 3+1j, 3-1j, 3-3j, 1+3j, 1+1j, 1-1j, 1-3j,\
                -1+3j, -1+1j, -1-1j, -1-3j, -3+3j, -3+1j, -3-1j, -3-3j')
    elif modulationType == '64pam':
        symbolMap = np.mat('1+1j, 3+1j, 1+3j, 3+3j, 7+1j, 5+1j, 7+3j,\
                5+3j, 1+7j, 3+7j, 1+5j, 3+5j, 7+7j, 5+7j, 7+5j, 5+5j, 1-1j,\
                1-3j, 3-1j, 3-3j, 1-7j, 1-5j, 3-7j, 3-5j, 7-1j, 7-3j, 5-1j,\
                5-3j, 7-7j, 7-5j, 5-7j, 5-5j, -1+1j, -1+3j, -3+1j, -3+3j,\
                -1+7j, -1+5j, -3+7j, -3+5j, -7+1j, -7+3j, -5+1j, -5+3j,\
                -7+7j, -7+5j, -5+7j, -5+5j, -1-1j, -3-1j, -1-3j, -3-3j,\
                -7-1j, -5-1j, -7-3j, -5-3j, -1-7j, -3-7j, -1-5j, -3-5j,\
                -7-7j, -5-7j, -7-5j, -5-5j')
    elif modulationType == '256qam':
        symbolMap = np.mat('1+1j, 1+3j, 1+5j, 1+7j, 1+9j, 1+11j, 1+13j,\
                1+15j, 1-1j, 1-3j, 1-5j, 1-7j, 1-9j, 1-11j, 1-13j, 1-15j,\
                3+1j, 3+3j, 3+5j, 3+7j, 3+9j, 3+11j, 3+13j, 3+15j, 3-1j,\
                3-3j, 3-5j, 3-7j, 3-9j, 3-11j, 3-13j, 3-15j, 5+1j, 5+3j,\
                5+5j, 5+7j, 5+9j, 5+11j, 5+13j, 5+15j, 5-1j, 5-3j, 5-5j,\
                5-7j, 5-9j, 5-11j, 5-13j, 5-15j, 7+1j, 7+3j, 7+5j, 7+7j,\
                7+9j, 7+11j, 7+13j, 7+15j, 7-1j, 7-3j, 7-5j, 7-7j, 7-9j,\
                7-11j, 7-13j, 7-15j, 9+1j, 9+3j, 9+5j, 9+7j, 9+9j, 9+11j,\
                9+13j, 9+15j, 9-1j, 9-3j, 9-5j, 9-7j, 9-9j, 9-11j, 9-13j,\
                9-15j, 11+1j, 11+3j, 11+5j, 11+7j, 11+9j, 11+11j, 11+13j,\
                11+15j, 11-1j, 11-3j, 11-5j, 11-7j, 11-9j, 11-11j, 11-13j,\
                11-15j, 13+1j, 13+3j, 13+5j, 13+7j, 13+9j, 13+11j, 13+13j,\
                13+15j, 13-1j, 13-3j, 13-5j, 13-7j, 13-9j, 13-11j, 13-13j,\
                13-15j, 15+1j, 15+3j, 15+5j, 15+7j, 15+9j, 15+11j, 15+13j,\
                15+15j, 15-1j, 15-3j, 15-5j, 15-7j, 15-9j, 15-11j, 15-13j,\
                15-15j, -1+1j, -1+3j, -1+5j, -1+7j, -1+9j, -1+11j, -1+13j,\
                -1+15j, -1-1j, -1-3j, -1-5j, -1-7j, -1-9j, -1-11j, -1-13j,\
                -1-15j, -3+1j, -3+3j, -3+5j, -3+7j, -3+9j, -3+11j, -3+13j,\
                -3+15j, -3-1j, -3-3j, -3-5j, -3-7j, -3-9j, -3-11j, -3-13j,\
                -3-15j, -5+1j, -5+3j, -5+5j, -5+7j, -5+9j, -5+11j, -5+13j,\
                -5+15j, -5-1j, -5-3j, -5-5j, -5-7j, -5-9j, -5-11j, -5-13j,\
                -5-15j, -7+1j, -7+3j, -7+5j, -7+7j, -7+9j, -7+11j, -7+13j,\
                -7+15j, -7-1j, -7-3j, -7-5j, -7-7j, -7-9j, -7-11j, -7-13j,\
                -7-15j, -9+1j, -9+3j, -9+5j, -9+7j, -9+9j, -9+11j, -9+13j,\
                -9+15j, -9-1j, -9-3j, -9-5j, -9-7j, -9-9j, -9-11j, -9-13j,\
                -9-15j, -11+1j, -11+3j, -11+5j, -11+7j, -11+9j, -11+11j,\
                -11+13j, -11+15j, -11-1j, -11-3j, -11-5j, -11-7j, -11-9j,\
                -11-11j, -11-13j, -11-15j, -13+1j, -13+3j, -13+5j, -13+7j,\
                -13+9j, -13+11j, -13+13j, -13+15j, -13-1j, -13-3j, -13-5j,\
                -13-7j, -13-9j, -13-11j, -13-13j, -13-15j, -15+1j, -15+3j,\
                -15+5j, -15+7j, -15+9j, -15+11j, -15+13j, -15+15j, -15-1j,\
                -15-3j, -15-5j, -15-7j, -15-9j, -15-11j, -15-13j, -15-15j')

    symbolMap = symbolMap / np.sqrt(np.mean(np.power(np.abs(symbolMap), 2)))
    return symbolMap
Пример #37
0
#!/usr/bin/python

import numpy

A = numpy.mat("3 4;5 6")
print "A\n", A

print "Determinant", numpy.linalg.det(A)
Пример #38
0
 def test_index(self):
     """Test the get_coefficients function for index.
     """
     size = (5, 4)
     # Eye
     key = (slice(0, 2, None), slice(0, 2, None))
     x = create_var(size)
     expr = index(x, (2, 2), key)
     coeffs = get_coefficients(expr)
     assert len(coeffs) == 1
     id_, mat = coeffs[0]
     self.assertEqual(id_, x.data)
     self.assertEqual(mat.shape, (4, 20))
     test_mat = np.mat(range(20)).T
     self.assertItemsAlmostEqual((mat * test_mat).reshape((2, 2),
                                                          order='F'),
                                 test_mat.reshape(size, order='F')[key])
     # Eye with scalar mult.
     key = (slice(0, 2, None), slice(0, 2, None))
     x = create_var(size)
     A = create_const(5, (1, 1))
     expr = mul_expr(A, x, size)
     expr = index(expr, (2, 2), key)
     coeffs = get_coefficients(expr)
     assert len(coeffs) == 1
     id_, mat = coeffs[0]
     test_mat = np.mat(range(20)).T
     self.assertItemsAlmostEqual((mat * test_mat).reshape(
         (2, 2), order='F'), 5 * test_mat.reshape(size, order='F')[key])
     # Promoted
     key = (slice(0, 2, None), slice(0, 2, None))
     x = create_var((1, 1))
     value = np.array(range(20)).reshape(size)
     A = create_const(value, size)
     prom_x = promote(x, (size[1], 1))
     expr = mul_expr(A, diag_vec(prom_x), size)
     expr = index(expr, (2, 2), key)
     coeffs = get_coefficients(expr)
     assert len(coeffs) == 1
     id_, mat = coeffs[0]
     self.assertEqual(mat.shape, (4, 1))
     self.assertItemsAlmostEqual(mat, value[key])
     # Normal
     size = (5, 5)
     key = (slice(0, 2, None), slice(0, 1, None))
     x = create_var((5, 1))
     A = create_const(np.ones(size), size)
     expr = mul_expr(A, x, (5, 1))
     expr = index(expr, (2, 1), key)
     coeffs = get_coefficients(expr)
     assert len(coeffs) == 1
     id_, mat = coeffs[0]
     self.assertEqual(mat.shape, (2, 5))
     self.assertItemsAlmostEqual(mat.todense(), A.data[slice(0, 2, None)])
     # Blocks
     size = (5, 5)
     key = (slice(0, 2, None), slice(0, 2, None))
     x = create_var(size)
     value = np.array(range(25)).reshape(size)
     A = create_const(value, size)
     expr = mul_expr(A, x, size)
     expr = index(expr, (2, 2), key)
     coeffs = get_coefficients(expr)
     assert len(coeffs) == 1
     id_, mat = coeffs[0]
     self.assertEqual(mat.shape, (4, 25))
     test_mat = np.mat(range(25)).T
     self.assertItemsAlmostEqual(
         (mat * test_mat).reshape((2, 2), order='F'),
         (A.data * test_mat.reshape(size, order='F'))[key])
     # Scalar constant
     size = (1, 1)
     A = create_const(5, size)
     key = (slice(0, 1, None), slice(0, 1, None))
     expr = index(A, (1, 1), key)
     coeffs = get_coefficients(expr)
     assert len(coeffs) == 1
     id_, mat = coeffs[0]
     self.assertEqual(intf.size(mat), (1, 1))
     self.assertEqual(mat, 5)
     # Dense constant
     size = (5, 4)
     key = (slice(0, 2, None), slice(0, 1, None))
     value = np.array(range(20)).reshape(size)
     A = create_const(value, size)
     expr = index(A, (2, 1), key)
     coeffs = get_coefficients(expr)
     assert len(coeffs) == 1
     id_, mat = coeffs[0]
     self.assertEqual(mat.shape, (2, 1))
     self.assertItemsAlmostEqual(mat, value[key])
     # Sparse constant
     size = (5, 5)
     key = (slice(0, 2, None), slice(0, 1, None))
     A = create_const(sp.eye(5), size)
     expr = index(A, (2, 1), key)
     coeffs = get_coefficients(expr)
     assert len(coeffs) == 1
     id_, mat = coeffs[0]
     self.assertEqual(mat.shape, (2, 1))
     self.assertItemsAlmostEqual(mat, sp.eye(5).todense()[key])
     # Parameter
     size = (5, 4)
     key = (slice(0, 2, None), slice(0, 1, None))
     param = Parameter(*size)
     value = np.array(range(20)).reshape(size)
     param.value = value
     A = create_param(param, size)
     expr = index(A, (2, 1), key)
     coeffs = get_coefficients(expr)
     assert len(coeffs) == 1
     id_, mat = coeffs[0]
     self.assertEqual(mat.shape, (2, 1))
     self.assertItemsAlmostEqual(mat, param.value[key])
Пример #39
0
def Initial():
    QFactorArr = 1
    F = np.mat([[1, 0.1], [0, 1]])
    H = np.mat([1, 0])
    QReal = np.mat([[0.01, 0.1], [0.1, 1]])
    R = 10
    x0 = np.mat([[1], [1]])
    N = 500
    nMonte = 2
    #    F=np.mat(ast.literal_eval(str_F.get()))
    #    H=np.mat(ast.literal_eval(str_H.get()))
    #    QReal=np.mat(ast.literal_eval(str_Q.get()))
    #    R=ast.literal_eval(str_R.get())
    #    x0=np.mat(ast.literal_eval(str_x0.get()))
    #    N=ast.literal_eval(str_N.get())

    nx = max(x0.shape)
    Qfilter = QFactorArr * QReal
    Rfilter = 1**2
    x0filter = np.mat([[1], [1]])
    P0filter = 1000 * np.mat(np.eye(nx))
    Nopt = 12
    #    Qfilter=np.mat(ast.literal_eval(str_Qfilter.get()))
    #    Rfilter=ast.literal_eval(str_Rfilter.get())
    #    x0filter=np.mat(ast.literal_eval(str_x0filter.get()))
    #    P0filter=np.mat(ast.literal_eval(str_P0filter.get()))
    #    Nopt=ast.literal_eval(str_Nopt.get())

    #    unF=np.mat(ast.literal_eval(str_unF.get()))
    #    unQ=np.mat(ast.literal_eval(str_unQ.get()))
    #    start=ast.literal_eval(str_start.get())
    #    end=ast.literal_eval(str_end.get())
    unFflag = chVarunF.get()
    unQflag = chVarnoise.get()
    print(unFflag, unQflag)
    if unFflag or unQflag == 1:
        start = 150
        end = 300
    if unFflag == 1 and unQflag == 0:
        unF = np.mat([[1, 1], [0, 1]])
        unQ = QReal
    if unQflag == 1 and unFflag == 0:
        unF = F
        unQ = 50 * QReal
    if unFflag == 0 and unQflag == 0:
        start = 150
        end = 300
        unF = F
        unQ = QReal

    systemModel = SystemModel(F, H, QReal, R, x0, N)
    filterModel = FilterModel(Qfilter, Rfilter, x0filter, P0filter, Nopt)
    unsystemModel = unSystemModel(unF, unQ, start, end, unFflag, unQflag)

    #    nx = max(F.shape)
    #    ny = min(H.shape)
    #    xArr=np.mat(np.zeros((nx, N)))
    #    yArr=np.mat(np.zeros((ny, N)))
    #    for k in range(0,N):
    #        x = F * x + np.sqrt(QReal) * np.random.randn(nx, 1)
    #        y = H * x + np.sqrt(R) * np.random.randn()
    #        xArr[:, k] = x
    #        yArr[:, k] = y
    return systemModel, filterModel, unsystemModel, nMonte
Пример #40
0
def Kalman(systemModel, filterModel, unsystemModel, smoothingFlag):
    F = systemModel.F
    H = systemModel.H
    QFilter = filterModel.Qfilter
    QReal = systemModel.QReal
    R = systemModel.R
    x = systemModel.x0
    N = systemModel.N
    nx = max(x.shape)
    ny = min(H.shape)
    Pplus = filterModel.P0filter
    xhatplus = filterModel.x0filter

    start_time = unsystemModel.start
    end_time = unsystemModel.end
    unF = unsystemModel.unF
    unQ = unsystemModel.unQ
    unFflag = unsystemModel.unFflag
    unQflag = unsystemModel.unQflag

    xArr = np.mat(np.zeros((nx, N)))
    PplusArr = np.zeros((nx, nx, N))
    PminusArr = np.zeros((nx, nx, N))
    xhatminusArr = np.mat(np.zeros((nx, N)))
    xhatplusArr = np.mat(np.zeros((nx, N)))
    xhatplusError = np.mat(np.zeros((nx, N)))
    PKalmanElements = np.mat(np.zeros((nx, N)))
    JArr = np.zeros((nx, nx, N))
    JInvArr = np.zeros((nx, nx, N))
    KArr = np.mat(np.zeros((nx, N)))
    yArr = np.mat(np.zeros((ny, N)))
    FArr = np.zeros((nx, nx, N))
    HArr = np.mat(np.zeros((N, nx)))
    SArr = np.mat(np.zeros((ny, N)))
    MeasResidual = np.mat(np.zeros((ny, N)))
    Inx = np.mat(np.eye(nx))

    #    print(unF, unQ, start_time, end_time, unFflag, unQflag)
    for k in range(0, N):
        FArr[:, :, k] = F
        HArr[k, :] = H

        if 150 <= k and k <= 300:
            x = unF * x + np.sqrt(np.mat([[0.01, 0.1], [0.1, 1]
                                          ])) * np.random.randn(nx, 1)


#        if start_time<=k and k<=end_time:
#            x = unF * x + np.sqrt(unQ) * np.random.randn(nx, 1)
        else:
            x = F * x + np.sqrt(QReal) * np.random.randn(nx, 1)
        y = H * x + np.sqrt(R) * np.random.randn()
        xArr[:, k] = x
        yArr[:, k] = y
        y = yArr[:, k]
        #Kalman filter
        Pminus = F * Pplus * F.T + QFilter
        K = Pminus * H.T * (H * Pminus * H.T + R).I
        xhatminus = F * xhatplus
        xhatplus = xhatminus + K * (y - H * xhatminus)
        Pplus = Pminus - K * H * Pminus

        PminusArr[:, :, k] = Pminus
        PplusArr[:, :, k] = Pplus
        PKalmanElements[0, k] = Pplus[0, 0]
        PKalmanElements[1, k] = Pplus[1, 1]
        xhatminusArr[:, k] = xhatminus
        xhatplusArr[:, k] = xhatplus
        xhatplusError[:, k] = x - xhatplus
        JArr[:, :, k] = Inx - K * H
        JInvArr[:, :, k] = (Inx - K * H).I
        KArr[:, k] = K
        SArr[:, k] = H * Pminus * H.T + R
        MeasResidual[:, k] = y - H * xhatminus

    #Kalman smoother
    xhatSmooth = xhatplus
    PSmooth = Pplus
    xhatSmoothArr = np.mat(np.zeros((nx, N)))
    xhatSmoothError = np.mat(np.zeros((nx, N)))
    xhatSmoothArr[:, N - 1] = xhatSmooth
    xhatSmoothError[:, N - 1] = xArr[:, N - 1] - xhatSmooth
    KSmootherArr = np.zeros((nx, nx, N - 1))
    PSmootherElements = np.mat(np.zeros((nx, N)))
    PSmootherElements[:, N - 1] = PKalmanElements[:, N - 1]
    if smoothingFlag:
        for k in range(N - 2, 0, -1):
            K = PplusArr[:, :, k] * F.T * np.mat(PminusArr[:, :, k + 1]).I
            PSmooth = PplusArr[:, :, k] - K * (PminusArr[:, :, k + 1] -
                                               PSmooth) * K.T
            xhatSmooth = xhatplusArr[:, k] + K * (xhatSmooth -
                                                  xhatminusArr[:, k + 1])
            # Save data in arrays
            xhatSmoothArr[:, k] = xhatSmooth
            xhatSmoothError[:, k] = xArr[:, k] - xhatSmooth
            KSmootherArr[:, :, k] = K
            PSmootherElements[0, k] = PSmooth[0, 0]
            PSmootherElements[1, k] = PSmooth[1, 1]
    return xArr, yArr, xhatplusArr, xhatplusError, xhatSmoothError, PKalmanElements, PSmootherElements, MeasResidual, PminusArr
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
    r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
    The Savitzky-Golay filter removes high frequency noise from data.
    It has the advantage of preserving the original shape and
    features of the signal better than other types of filtering
    approaches, such as moving averages techniques.
    Parameters
    ----------
    y : array_like, shape (N,)
        the values of the time history of the signal.
    window_size : int
        the length of the window. Must be an odd integer number.
    order : int
        the order of the polynomial used in the filtering.
        Must be less then `window_size` - 1.
    deriv: int
        the order of the derivative to compute (default = 0 means only smoothing)
    Returns
    -------
    ys : ndarray, shape (N)
        the smoothed signal (or it's n-th derivative).
    Notes
    -----
    The Savitzky-Golay is a type of low-pass filter, particularly
    suited for smoothing noisy data. The main idea behind this
    approach is to make for each point a least-square fit with a
    polynomial of high order over a odd-sized window centered at
    the point.
    Examples
    --------
    t = np.linspace(-4, 4, 500)
    y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
    ysg = savitzky_golay(y, window_size=31, order=4)
    import matplotlib.pyplot as plt
    plt.plot(t, y, label='Noisy signal')
    plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
    plt.plot(t, ysg, 'r', label='Filtered signal')
    plt.legend()
    plt.show()
    References
    ----------
    .. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
       Data by Simplified Least Squares Procedures. Analytical
       Chemistry, 1964, 36 (8), pp 1627-1639.
    .. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
       W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
       Cambridge University Press ISBN-13: 9780521880688
    """

    try:
        window_size = np.abs(np.int(window_size))
        order = np.abs(np.int(order))
    except ValueError as msg:
        raise ValueError("window_size and order have to be of type int")
    if window_size % 2 != 1 or window_size < 1:
        raise TypeError("window_size size must be a positive odd number")
    if window_size < order + 2:
        raise TypeError("window_size is too small for the polynomials order")
    order_range = range(order + 1)
    half_window = (window_size - 1) // 2
    # precompute coefficients
    b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
    m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)
    # pad the signal at the extremes with
    # values taken from the signal itself
    firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
    lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
    y = np.concatenate((firstvals, y, lastvals))
    return np.convolve(m[::-1], y, mode='valid')
Пример #42
0
def UFIR(systemModel, filterModel, unsystemModel, smoothingFlag):
    F = systemModel.F
    H = systemModel.H
    QReal = systemModel.QReal
    R = systemModel.R
    x = systemModel.x0
    N = systemModel.N
    nx = max(x.shape)
    ny = min(H.shape)
    nOpt = filterModel.Nopt

    start_time = unsystemModel.start
    end_time = unsystemModel.end
    unF = unsystemModel.unF
    unQ = unsystemModel.unQ
    unFflag = unsystemModel.unFflag
    unQflag = unsystemModel.unQflag

    if smoothingFlag:
        p = int(nOpt / 2 + 0.5)
        iLowerLimit = max(nOpt + 1, nOpt + p - nx)
        iUpperLimit = min(N - 1, N + nOpt - nx - 1)
        NArr = np.arange(iLowerLimit, N - p, 1)
    else:
        p = 0
        iLowerLimit = nOpt + 2 - nx
        iUpperLimit = min(N - 1, N + nOpt - nx - 1)
        NArr = np.arange(nOpt + 2 - nx, iUpperLimit + 1, 1)
    xArr = np.mat(np.zeros((nx, N)))
    xhatArr = np.mat(np.zeros((nx, N)))
    xhatError = np.mat(np.zeros((nx, N)))
    PElements = np.mat(np.zeros((nx, N)))
    yArr = np.mat(np.zeros((ny, N)))
    FArr = np.zeros((nx, nx, N))
    HArr = np.mat(np.zeros((N, nx)))
    MeasResidual = np.mat(np.zeros((ny, N)))
    Inx = np.mat(np.eye(nx))
    FExpMinusP = F.I**p

    #    print(unF, unQ, start_time, end_time, unFflag, unQflag)
    for k in range(0, N):
        FArr[:, :, k] = F
        HArr[k, :] = H

        if 150 <= k and k <= 300:
            x = unF * x + np.sqrt(np.mat([[0.01, 0.1], [0.1, 1]
                                          ])) * np.random.randn(nx, 1)


#        if start_time<=k and k<=end_time:
#            x = unF * x + np.sqrt(unQ) * np.random.randn(nx, 1)
        else:
            x = F * x + np.sqrt(QReal) * np.random.randn(nx, 1)
        y = H * x + np.sqrt(R) * np.random.randn()
        xArr[:, k] = x
        yArr[:, k] = y
    Ysm = np.mat(np.zeros((1, nx * ny)))
    FsmT = np.mat(np.zeros((nx, nx**2)))
    HsmBar = np.mat(np.zeros((nx * ny, nx**2)))
    for i in range(iLowerLimit, iUpperLimit + 1):
        m = i - nOpt + 1
        s = m + nx - 1
        Yindex = nx * ny
        for j in range(m, s + 1):
            Ysm[Yindex - ny + 1 - 1:Yindex] = yArr[:, j - 1].T
            Yindex = Yindex - ny
        for j in range(1, nx + 1):
            temp = Inx
            for iii in range(j - 1, nx - 1):
                temp = temp * FArr[:, :, s - iii - 1]
            FsmT[:, (j - 1) * nx + 1 - 1:j * nx] = temp.T
        for iii in range(s, m - 1, -1):
            HsmBar[(s - iii) * ny + 1 - 1:(s - iii + 1) * ny,
                   (s - iii) * nx + 1 - 1:(s - iii + 1) * nx] = HArr[s, :]
        Hsm = HsmBar * FsmT.T
        alpha = Inx
        if p >= nx:
            for iii in range(1, p - nx):
                alpha = alpha * FArr[:, :, s - p + iii - 1].I
        else:
            for iii in range(0, nx - 1 - p):
                alpha = alpha * FArr[:, :, s - p - iii - 1]
        xhat = alpha * (Hsm.T * Hsm).I * Hsm.T * Ysm.T
        TempPord = Inx
        for i1 in range(0, nx - 1):
            TempPord = TempPord * FArr[:, :, s - i1 - 1]
        G = TempPord * (Hsm.T * Hsm).I * TempPord.T
        gamma = Inx
        for i1 in range(0, p + 1):
            gamma = gamma * FArr[:, :, i - nOpt + nx + 1 - i1 - 1]
        for el in range(i - nOpt + nx + 1, i + 1):
            G = (HArr[el - 1, :].T * HArr[el - 1, :] +
                 (FArr[:, :, el - 1] * G * np.mat(FArr[:, :, el - 1]).T).I).I
            K = FArr[:, :, el - p - 1] * gamma.I * G * HArr[el - 1, :].T
            xhat = FArr[:, :, el - p - 1] * xhat + K * (
                yArr[:, el - 1] - HArr[el - 1, :] * gamma * xhat)
            if el < i:
                gamma = FArr[:, :, el + 1 - 1] * gamma * (np.mat(
                    FArr[:, :, el - p - 1]).I)
        xhatArr[:, i - p - 1] = xhat
        xhatError[:, i - p - 1] = xArr[:, i - p - 1] - xhat
        Covariance = R * FExpMinusP * G * FExpMinusP.T
        PElements[0, i - p - 1] = Covariance[0, 0]
        PElements[1, i - p - 1] = Covariance[1, 1]
        MeasResidual[:, i - p -
                     1] = yArr[:, i - p - 1] - HArr[i - p - 1, :] * xhat
    return xArr, yArr, xhatArr, xhatError, PElements, NArr, MeasResidual
Пример #43
0
            states_index_random = get_states_index_random()
            index_7s, prob_normal_7s, directions_7s_01, max_p = cal_7directions_probability(
                states_index_random, index_current=i)
            # print("index_7s = ", index_7s)
            # print("prob_normal_7s =", prob_normal_7s)
            for j, index in enumerate(index_7s):
                if 0 <= index <= 607:
                    state_transition_matrix[i, index] = np.array(
                        prob_normal_7s[j])

        input_vector = np.zeros((1, 608))
        start_index = 353
        input_vector[0, start_index] = 1

        B = state_transition_matrix
        np.save("transition_matrix.npy", B)
        step = 200
        A = input_vector
        print("\nAfter 200 iterations ......")
        for i in tqdm(range(step)):
            B = np.dot(np.mat(B), np.mat(B))
            B /= np.max(B)

        probility_distribution_608s = np.dot(np.mat(A), np.mat(B))

        p_data = np.array(probility_distribution_608s)
        p_data = np.reshape(p_data, (32, 19))
        b = p_data * 255
        img = cv2.merge([b, b, b])
        print("\nPrior predictive distribution")
        show_probility_img3D(img, title_name="Prior predictive distribution")
Пример #44
0
            b: the bias of the SVM model, a float scalar.
    '''
    n, p = X.shape

    #l: (lambda) = 1/ (n C), which is the weight of the L2 regularization term.
    l = 1. / (n * C)

    w, b = np.asmatrix(np.zeros((p, 1))), np.asmatrix(np.zeros(
        (p, 0)))  # initialize the weight vector as all zeros
    for _ in xrange(n_epoch):
        for i in xrange(n):
            x = X[i].T  # get the i-th instance in the dataset
            y = float(Y[i])
            #########################################
            ## INSERT YOUR CODE HERE
            dL_dw, dL_db = gradient(x, y, w, b, l)

            w = update_w(w, dL_dw, lr)
            b = update_b(b, dL_db, lr)
            #########################################
    return w, b


x = np.mat('1,1;2,1;1,2')
y = np.mat('4;6;5')

w, b = train(x, y)

print w
print b
Пример #45
0
    def __init__(self, arm_name='MTMR'):
        self._num_rows = 6
        self._num_cols = 7
        self._jac_spa = np.zeros((self._num_rows, self._num_cols))
        self._jac_bod = np.zeros((self._num_rows, self._num_cols))

        self._new_data = 0

        self._rate = rospy.Rate(500)
        self._joint_state = JointState()
        self._cart_state = JointState()
        self._state_cmd = JointState()

        self._wrench_cmd = np.zeros((6, 1))
        self._joint_torques_cmd = np.zeros((7, 1))

        self._jac_spa_sub = rospy.Subscriber('/dvrk/' + arm_name +
                                             '/jacobian_spatial',
                                             Float64MultiArray,
                                             self.jac_spa_cb,
                                             queue_size=1)
        self._jac_bod_sub = rospy.Subscriber('/dvrk/' + arm_name +
                                             '/jacobian_body',
                                             Float64MultiArray,
                                             self.jac_bod_cb,
                                             queue_size=1)
        self._js_sub = rospy.Subscriber('/dvrk/' + arm_name +
                                        '/state_joint_current',
                                        JointState,
                                        self.js_cb,
                                        queue_size=1)
        self._cs_sub = rospy.Subscriber('/dvrk/' + arm_name +
                                        '/position_cartesian_current',
                                        PoseStamped,
                                        self.cs_cb,
                                        queue_size=1)
        self._wrench_bod_sub = rospy.Subscriber('/ambf/' + arm_name +
                                                '/set_wrench_body',
                                                Wrench,
                                                self.wrench_bod_cb,
                                                queue_size=1)

        self._torque_pub = rospy.Publisher('/dvrk/' + arm_name +
                                           '/set_effort_joint',
                                           JointState,
                                           queue_size=5)

        self._wd = WatchDog(0.1)

        self.l4_o = np.identity(3)

        self.l5_o = np.mat([[0, 0, -1], [0, 1, 0], [1, 0, 0]])

        self.l6_o = np.mat([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])

        self.l7_o = np.mat([[1, 0, 0], [0, 0, -1], [0, 1, 0]])

        self.Kp = 0.3
        self.Kd = 0

        theta = 0
        self._rot_offset = np.mat([[+1.0, +0.0, +0.0],
                                   [+0.0, +np.cos(theta), +np.sin(theta)],
                                   [+0.0, -np.sin(theta), +np.cos(theta)]])

        self._ee_rot = Rotation()
Пример #46
0
import sys
from numpy import mat, mean, power


# 分布式均值和方差计算的mapper
def read_input(file):
    for line in file:
        yield line.rstrip()


input = read_input(sys.stdin)
input = [float(line) for line in input]
numInputs = len(input)
input = mat(input)
sqInput = power(input, 2)
print("%d\t%f\t%f" % (numInputs, mean(input), mean(sqInput)))
print(sys.stderr, "report: still alive")
Пример #47
0
    def sampleUniqueFeatures(self):
        #pdb.set_trace()
        #For each video
        (N, K) = self.F.shape
        #Most of the statistics are already coll
        for sq in range(N):
            (N, K) = self.F.shape
            preComputedEta = {}
            featCount = np.sum(self.F, axis=0)
            vidCount = np.sum(self.F, axis=1)
            uniqueL = []
            for kk in range(K):
                if featCount[0, kk] == 1 and self.F[sq, kk] == 1:
                    uniqueL.append(kk)
            #First compute the old probability anyway
            (i, j) = self.F[sq, :].nonzero()
            j = np.asarray(j)[0]
            TranNew = self.eta[sq][j, :][:, j]
            StateNew = {}
            StateNew['v'] = self.states['v'][0][j, :]
            StateNew['l'] = self.states['l'][0][j, :]

            preComputedEta[sq] = np.mat(
                np.random.gamma(
                    np.ones((K, K)) * self.lamb + np.eye(K) * self.kappa))
            for x in j:
                for y in j:
                    preComputedEta[sq][x, y] = self.eta[sq][x, y]

            oldProb = self.getFastHMMLikelihood(
                StateNew, TranNew, sq,
                self.obsPreComputedLikelihoods[sq][j, :])

            if (len(uniqueL) == 0 or np.random.rand() > 0.5):
                #Birth
                #First choose data driven window length
                vid = self.videos[sq]
                if self.minW < len(vid):
                    leng = int(
                        np.random.uniform(self.minW,
                                          min(self.maxW,
                                              len(vid) - 1)))
                else:
                    leng = int(
                        np.random.uniform(1, min(self.maxW,
                                                 len(vid) - 1)))
                startP = int(np.random.uniform(0, len(vid) - leng - 1))

                F = np.concatenate((self.F, np.zeros((self.F.shape[0], 1))),
                                   axis=1)
                F[sq, self.F.shape[1]] = 1

                preComputedEta[sq] = np.mat(
                    np.random.gamma(
                        np.ones((K + 1, K + 1)) * self.lamb +
                        np.eye(K + 1) * self.kappa))
                (i, j) = F[sq, :].nonzero()
                j = np.asarray(j)[0]
                for x in j[:-1]:
                    for y in j[:-1]:
                        preComputedEta[sq][x, y] = self.eta[sq][x, y]

                TranNew = preComputedEta[sq][j, :][:, j]
                #PopulateNew Row
                self.obsPreComputedLikelihoods[sq] = np.concatenate(
                    (self.obsPreComputedLikelihoods[sq],
                     np.zeros((1, len(self.videos[sq])))),
                    axis=0)

                #print self.states
                ThetaNew = self.mlTheta(sq, startP, startP + leng + 1, F,
                                        self.F.shape[1])
                for x in range(self.F.shape[1]):
                    ThetaNew['v'][0][x, :] = self.states['v'][0][x, :]
                    ThetaNew['l'][0][x, :] = self.states['l'][0][x, :]
                #print ThetaNew,self.obsPreComputedLikelihoods[sq].shape

                for t in range(len(self.videos[sq])):
                    kk = K
                    self.obsPreComputedLikelihoods[sq][kk, t] = np.sum(
                        np.log([
                            ThetaNew['v'][0][kk, i] *
                            self.videos[sq][t]['obsV'][i] +
                            (1 - ThetaNew['v'][0][kk, i]) *
                            (1 - self.videos[sq][t]['obsV'][i])
                            for i in range(self.numVidObjs)
                        ]))
                    self.obsPreComputedLikelihoods[sq][kk, t] += np.sum(
                        np.log([
                            ThetaNew['l'][0][kk, i] *
                            self.videos[sq][t]['obsL'][i] +
                            (1 - ThetaNew['l'][0][kk, i]) *
                            (1 - self.videos[sq][t]['obsL'][i])
                            for i in range(self.numLangObjs)
                        ]))

                newProb = self.getFastHMMLikelihood(
                    ThetaNew, TranNew, sq,
                    self.obsPreComputedLikelihoods[sq][j, :])

                etaa = self.gamma * 1.0 / (1.0 + N - 1.0)
                logPrNumFeat_Diff = np.log(etaa) + sp.special.gammaln(
                    F.shape[1] - 1) - sp.special.gammaln(F.shape[1])

                #pdb.set_trace()
                if np.exp(newProb - oldProb + logPrNumFeat_Diff - np.log(0.5) +
                          np.log(0.5 /
                                 (len(uniqueL) + 1.0))) > np.random.rand():
                    self.F = F

                    self.states['v'][0] = np.concatenate(
                        (self.states['v'][0], ThetaNew['v'][0][-1, :]), axis=0)
                    self.states['l'][0] = np.concatenate(
                        (self.states['l'][0], ThetaNew['l'][0][-1, :]), axis=0)

                    self.eta[sq] = np.concatenate(
                        (self.eta[sq], preComputedEta[sq][:-1, -1]), axis=1)
                    self.eta[sq] = np.concatenate(
                        (self.eta[sq], preComputedEta[sq][-1, :]), axis=0)

                    #Update the next ones
                    L1 = range(len(self.videos))
                    L1.remove(sq)
                    for vi in L1:
                        self.eta[vi] = np.concatenate(
                            (self.eta[vi], np.zeros(
                                (self.eta[vi].shape[0], 1))),
                            axis=1)
                        self.eta[vi] = np.concatenate(
                            (self.eta[vi], np.zeros(
                                (1, self.eta[vi].shape[1]))),
                            axis=0)
                        self.obsPreComputedLikelihoods[vi] = np.concatenate(
                            (self.obsPreComputedLikelihoods[vi],
                             np.zeros((1, len(self.videos[vi])))),
                            axis=0)

                        for t in range(len(self.videos[vi])):
                            kk = K
                            self.obsPreComputedLikelihoods[vi][kk, t] = np.sum(
                                np.log([
                                    self.states['v'][0][kk, i] *
                                    self.videos[vi][t]['obsV'][i] +
                                    (1 - self.states['v'][0][kk, i]) *
                                    (1 - self.videos[vi][t]['obsV'][i])
                                    for i in range(self.numVidObjs)
                                ]))
                            self.obsPreComputedLikelihoods[vi][
                                kk, t] += np.sum(
                                    np.log([
                                        self.states['l'][0][kk, i] *
                                        self.videos[vi][t]['obsL'][i] +
                                        (1 - self.states['l'][0][kk, i]) *
                                        (1 - self.videos[vi][t]['obsL'][i])
                                        for i in range(self.numLangObjs)
                                    ]))
                else:
                    self.obsPreComputedLikelihoods[sq] = np.delete(
                        self.obsPreComputedLikelihoods[sq], K, 0)
                    #print 'SPB',self.F.shape[1],self.states
                    #update eta,theta
            elif (vidCount[sq, 0] > 1):
                #Death
                feat2kill = np.random.choice(uniqueL)

                self.F[sq, feat2kill] = 0

                (i, j) = self.F[sq, :].nonzero()
                j = np.asarray(j)[0]

                preComputedEta[sq] = np.mat(
                    np.random.gamma(
                        np.ones((K, K)) * self.lamb + np.eye(K) * self.kappa))
                preComputedEta[sq][j, :][:, j] = self.eta[sq][j, :][:, j]
                TranNew = preComputedEta[sq][j, :][:, j]

                StateNew = {}
                StateNew['v'] = [self.states['v'][0][j, :]]
                StateNew['l'] = [self.states['l'][0][j, :]]

                newProb = self.getFastHMMLikelihood(
                    StateNew, TranNew, sq,
                    self.obsPreComputedLikelihoods[sq][j, :])

                etaa = self.gamma * 1.0 / (1.0 + N - 1.0)
                logPrNumFeat_Diff = -np.log(etaa) - sp.special.gammaln(
                    self.F.shape[1] - 1) + sp.special.gammaln(self.F.shape[1])
                if np.exp(newProb - oldProb + logPrNumFeat_Diff -
                          np.log(0.5 / len(uniqueL)) +
                          np.log(0.5)) > np.random.rand():
                    #accept
                    newF = self.F[:,
                                  range(0, feat2kill) +
                                  range(feat2kill + 1, self.F.shape[1])]
                    self.F = newF

                    self.states['v'][0] = np.delete(self.states['v'][0],
                                                    feat2kill, 0)
                    self.states['l'][0] = np.delete(self.states['l'][0],
                                                    feat2kill, 0)

                    #Update the next ones
                    L1 = range(len(self.videos))
                    #L1.remove(sq)
                    for vi in L1:
                        for xx in range(feat2kill + 1, self.eta[vi].shape[1]):
                            for yy in range(feat2kill + 1,
                                            self.eta[vi].shape[1]):
                                self.eta[vi][xx - 1, yy - 1] = self.eta[vi][xx,
                                                                            yy]

                        dim2kill = self.eta[vi].shape[1] - 1
                        self.eta[vi] = np.delete(self.eta[vi], dim2kill, 1)
                        self.eta[vi] = np.delete(self.eta[vi], dim2kill, 0)
                        self.obsPreComputedLikelihoods[vi] = np.delete(
                            self.obsPreComputedLikelihoods[vi], feat2kill, 0)
                else:
                    self.F[sq, feat2kill] = 1
        return 0
Пример #48
0
 def get_detax(self, x):
     uvcen = [331, 229]
     kk = numpy.mat(uvcen).T - numpy.mat(x).T
     return kk.reshape((1, 2)).tolist()
def initialize_v(n,k):
    v=np.mat(np.zeros((n,k)))
    for i in range(n):
        for j in range(k):
            v[i,j]=normalvariate(0,0.2)
    return v
Пример #50
0
    def sampleSharedFeats(self):
        #pdb.set_trace()
        #We only sample the columns who has more than one video to support it. In other words, we do not sample features unqiue to a specific video.
        #First compute the number of occurences of each feature
        featCount = np.sum(self.F, axis=0)
        vidCount = np.sum(self.F, axis=1)
        (N, K) = self.F.shape

        #For computational efficiency we first cache all likelihoods and eta's
        self.obsPreComputedLikelihoods = {}
        preComputedEta = {}
        for sq in range(N):
            #Cache likelihoods
            self.obsPreComputedLikelihoods[sq] = np.zeros(
                (K, len(self.videos[sq])))
            for t in range(len(self.videos[sq])):
                for kk in range(K):
                    self.obsPreComputedLikelihoods[sq][kk, t] = np.sum(
                        np.log([
                            self.states['v'][0][kk, i] *
                            self.videos[sq][t]['obsV'][i] +
                            (1 - self.states['v'][0][kk, i]) *
                            (1 - self.videos[sq][t]['obsV'][i])
                            for i in range(self.numVidObjs)
                        ]))
                    self.obsPreComputedLikelihoods[sq][kk, t] += np.sum(
                        np.log([
                            self.states['l'][0][kk, i] *
                            self.videos[sq][t]['obsL'][i] +
                            (1 - self.states['l'][0][kk, i]) *
                            (1 - self.videos[sq][t]['obsL'][i])
                            for i in range(self.numLangObjs)
                        ]))
            #Cache ETAs
            preComputedEta[sq] = np.mat(
                np.random.gamma(
                    np.ones((K, K)) * self.lamb + np.eye(K) * self.kappa))
            (i, j) = self.F[sq, :].nonzero()
            j = np.asarray(j)[0]
            for x in j:
                for y in j:
                    preComputedEta[sq][x, y] = self.eta[sq][x, y]
        #pdb.set_trace()
        for sq in range(N):
            PrObsCur = 1
            for ft in range(K):
                if (vidCount[sq, 0] == 1) and (self.F[sq, ft] == 1):
                    continue
                if (featCount[0, ft] == 1 and self.F[sq, ft] == 1):
                    continue

                if PrObsCur == 1:
                    (i, j) = self.F[sq, :].nonzero()
                    j = np.asarray(j)[0]
                    TranNew = preComputedEta[sq][j, :][:, j]
                    StateNew = {}
                    StateNew['v'] = self.states['v'][0][j, :]
                    StateNew['l'] = self.states['l'][0][j, :]
                    PrObsCur = self.getFastHMMLikelihood(
                        StateNew, TranNew, sq,
                        self.obsPreComputedLikelihoods[sq][j, :])

                #First decrement the feat count
                featCount[0, ft] = featCount[0, ft] - self.F[sq, ft]
                vidCount[sq, 0] = vidCount[sq, 0] - self.F[sq, ft]

                if self.F[sq, ft] == 1:
                    PrPRatio = (N - featCount[0, ft]) / float(featCount[0, ft])
                else:
                    PrPRatio = (featCount[0, ft]) / float(N - featCount[0, ft])
                #Here we sample the F_ik  by using the Eq.15 from Emily nFox et.al., JOINT MODELING OF MULTIPLE TIME SERIES VIA THE BETA PROCESS WITH APPLICATION TO MOTION CAPTURE SEGMENTATION

                self.F[sq, ft] = 1 - self.F[sq, ft]
                (i, j) = self.F[sq, :].nonzero()
                j = np.asarray(j)[0]
                TranNew = preComputedEta[sq][j, :][:, j]
                StateNew = {}
                StateNew['v'] = self.states['v'][0][j, :]
                StateNew['l'] = self.states['l'][0][j, :]
                PrObsNew = self.getFastHMMLikelihood(
                    StateNew, TranNew, sq,
                    self.obsPreComputedLikelihoods[sq][j, :])

                if np.exp(PrObsNew - PrObsCur) * PrPRatio > np.random.rand():
                    PrObsCur = PrObsNew
                    if self.F[sq, ft] == 1:
                        for x in j:
                            self.eta[sq][x, ft] = preComputedEta[sq][x, ft]
                            self.eta[sq][ft, x] = preComputedEta[sq][ft, x]
                    else:
                        for x in j:
                            self.eta[sq][x, ft] = 0
                            self.eta[sq][ft, x] = 0
                        self.eta[sq][ft, ft] = 0
                else:
                    self.F[sq, ft] = 1 - self.F[sq, ft]

                featCount[0, ft] = featCount[0, ft] + self.F[sq, ft]
                vidCount[sq, 0] = vidCount[sq, 0] + self.F[sq, ft]
        return 0
Пример #51
0
def rot_z(q):
    r = np.mat([[np.cos(q), -np.sin(q), 0], [np.sin(q),
                                             np.cos(q), 0], [0, 0, 1]])
    return r
Пример #52
0
        image, label_pts = train_dataset.next_batch(1)
        label_pts = label_pts[0]
        image = [
            cv2.resize(tmp, (128, 64), interpolation=cv2.INTER_LINEAR)
            for tmp in image
        ]
        c_val = sess.run(coffe,
                         feed_dict={
                             tensor_in: image,
                             gt_label_pts: label_pts
                         })
        R = np.zeros([3, 3], np.float32)
        R[0, 0] = c_val[0]
        R[0, 1] = c_val[1]
        R[0, 2] = c_val[2]
        R[1, 1] = c_val[3]
        R[1, 2] = c_val[4]
        R[2, 1] = c_val[5]
        R[2, 2] = 1
        print(np.mat(R).I)
        print(R)
        print(c_val)

        warp_image = cv2.warpPerspective(image[0],
                                         R,
                                         dsize=(image[0].shape[1],
                                                image[0].shape[0]))
        cv2.imwrite("src.jpg", image[0])
        cv2.imwrite("ret.jpg", warp_image)
Пример #53
0
import numpy as np
from numpy import linalg as la
'''
C=np.array([1.75390 ,-0.51310, -0.00060])
Pt=np.array([-0.33130,  0.01060 , 0.00000])
v=C-Pt
v=v/np.sqrt(np.sum(v**2))
'''
###script that generates the rotation matrix that aligns a to b (a moves to align with b)
a = np.mat([1, 1, 1])
b = np.mat([0, 0, 1])
a = a / np.sqrt(np.dot(a, a.T))
c = np.dot(a, b.T)
v = np.cross(a, b)[0]
s = np.sqrt(np.dot(v, v.T))
y = np.mat([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])

Rot = np.mat(np.diag([1, 1, 1])) + y + np.dot(y, y) * (((1 - c) / s**2)[0, 0])

print(Rot, la.det(Rot))
print(np.dot(Rot, np.mat([1, 0, 0]).T), np.dot(Rot, np.mat([0, 1, 0]).T))
print(la.inv(Rot))
    for i in range(m):
        allItem+=1
        if float(predict[i])<0.5 and classLabels[i]==1.0:
            error+=1
        elif float(predict[i])>=0.5 and classLabels[i]==-1.0:
            error+=1
        else:
            continue
    return float(error)/allItem


if __name__=="__main__":
    print("----1.load data---")
    dataTrain,labelTrain=loadDataSet("FM.txt")
    print("----2.learning----")
    w0,w,v=stocGradAscent(np.mat(dataTrain),labelTrain,3,5000,0.01)
    predict_result=getPrediction(np.mat(dataTrain),w0,w,v)
    print("----training error:----",(1-getAccuracy(predict_result,labelTrain)))
    print("----3.save result----")
    #save_model("weights",w0,w,v)








# coding:UTF-8
# '''
# Date:20160831
Пример #55
0
    import numpy as np
    from math import factorial

    try:
        window_size = np.abs(np.int(window_size))
        order = np.abs(np.int(order))
    except ValueError, msg:
        raise ValueError("window_size and order have to be of type int")
    if window_size % 2 != 1 or window_size < 1:
        raise TypeError("window_size size must be a positive odd number")
    if window_size < order + 2:
        raise TypeError("window_size is too small for the polynomials order")
    order_range = range(order + 1)
    half_window = (window_size - 1) // 2
    # precompute coefficients
    b = np.mat([[k**i for i in order_range]
                for k in range(-half_window, half_window + 1)])
    m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
    # pad the signal at the extremes with
    # values taken from the signal itself
    firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
    lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
    y = np.concatenate((firstvals, y, lastvals))
    return np.convolve(m[::-1], y, mode='valid')


def read_options():
    desc = """Questions and suggestions: [email protected]"""
    global options
    opt = optparse.OptionParser(
        usage='Usage: %prog --ms=somename.MS <options>',
        version='%prog version 1.0',
                b = b + eta * labelMat[i]
#                w = np.multiply(labelMat,alpha).T*dataMat
                print (i,alpha[i])
                flag = True
                break
            else:
                flag = False
#    w = (np.multiply(labelMat,alpha).T*dataMat).T
    return alpha

    

if __name__=='__main__':
    trainData,label,x_0,y_0,x_1,y_1,x_2,y_2 = generateData(1200)
    m = np.shape(trainData)[0]
    K = np.mat(np.zeros((m,m)))

    #kernel matrix
    for i in range(m):
        K[:,i] = kernelTrans(np.mat(trainData), np.mat(trainData[i,:]), ('rbf',1))

    
    alpha = trainModel(trainData,label,1,K)
    
  
    
    plt.subplots_adjust(right=0.825)
    ax=plt.gca()
    plt.scatter(x_2, y_2, alpha=1,label = 'separated',)
    plt.scatter(x_0, y_0, alpha=0.6,label = 'Positive',color='green')
    plt.scatter(x_1, y_1, alpha=0.6,label = 'Negative',color='red')  # 绘制散点图,透明度为0.6(这样颜色浅一点,比较好看)
Пример #57
0
 def get_N_of_line(self, L1):
     Ni = L1
     Nj = 1 - L1
     Nk = 0
     N_of_line = np.mat([Ni, Nj, Nk]).T
     return N_of_line
Пример #58
0
    def init_grid(self):
        '自动识别文件夹中三种数据文件,并以Pandas特征数据返回'
        filename_point = './' + self.filename + '/E.n'
        filename_element = './' + self.filename + '/E.e'
        filename_boundary = './' + self.filename + '/E.s'

        # 读取节点信息文件
        number_of_triangle_point = pd.read_csv(filename_point,
                                               header=None,
                                               delim_whitespace=True,
                                               nrows=1).iat[0, 0]
        list_of_no_read = [
            0, number_of_triangle_point + 1, number_of_triangle_point + 2
        ]
        # 第一行与最后两行数据无需读取
        # pd.read_csv参数参考http://www.cnblogs.com/datablog/p/6127000.html
        self.point_of_global = pd.read_csv(
            filename_point,
            delim_whitespace=True,
            names=["point NO", "x", "y", "boundary mark"],
            skiprows=list_of_no_read)

        # 读取单元信息文件
        number_of_triangle_element = pd.read_csv(filename_element,
                                                 header=None,
                                                 delim_whitespace=True,
                                                 nrows=1).iat[0, 0]
        list_of_no_read = [
            0, number_of_triangle_element + 1, number_of_triangle_element + 2
        ]
        self.element_of_global = pd.read_csv(
            filename_element,
            delim_whitespace=True,
            names=[
                "element number", "point i", "point j", "point k",
                "neighbor ele i", "neighbor ele j", "neighbor ele k",
                "boundary i", "boundary j", "boundary k"
            ],
            skiprows=list_of_no_read)

        # 读取边界信息文件
        number_of_triangle_boundary = pd.read_csv(filename_boundary,
                                                  header=None,
                                                  delim_whitespace=True,
                                                  nrows=1).iat[0, 0]
        list_of_no_read = [
            0, number_of_triangle_boundary + 1, number_of_triangle_boundary + 2
        ]
        self.boundary_of_global = pd.read_csv(filename_boundary,
                                              delim_whitespace=True,
                                              names=[
                                                  "boundary start",
                                                  "boundary end",
                                                  "left element",
                                                  "right element", "mark"
                                              ],
                                              skiprows=list_of_no_read)

        # 初始化总刚度矩阵 总载荷矩阵
        self.Ae = np.mat(
            np.tile([0.],
                    (number_of_triangle_point, number_of_triangle_point)))
        self.f = np.mat(np.tile([0.], (number_of_triangle_point, 1)))
Пример #59
0
# Summary of the new catagory
concreteList['Strenght Catagory'].value_counts()

# Extract class names to python list,
# then encode with integers (dict)
classLabels = concreteList['Strenght Catagory']
classNames = sorted(set(classLabels))
classDict = dict(zip(classNames, range(5)))

# Compute values of N and M.
N = concreteList.shape[0]
M = concreteList.shape[1]
C = len(classNames)

# Extract vector y, convert to np matrix and transpose
y = np.mat([classDict[value] for value in classLabels]).T
X = np.array(concreteList.iloc[:, 0:-2])

# Data attributes to be plotted
i = 1
j = 6

# attribute names, and a title.
f = figure()
title('NanoNose data')

for c in range(C):
    # select indices belonging to class c:
    class_mask = y.A.ravel() == c
    plot(X[class_mask, i], X[class_mask, j], 'o')
legend(classNames)
Пример #60
0
 def get_area(self):
     area_of_element = np.mat([[self.xi, self.yi, 1], [self.xj, self.yj, 1],
                               [self.xk, self.yk, 1]], )
     self.area_of_element = np.linalg.det(area_of_element) / 2