示例#1
1
def align_magnetism(m, vectors):
    """ Rotates a matrix, to align its components with the direction
  of the magnetism """
    if not len(m) == 2 * len(vectors):  # stop if they don't have
        # compatible dimensions
        raise
    # pauli matrices
    from scipy.sparse import csc_matrix, bmat

    sx = csc_matrix([[0.0, 1.0], [1.0, 0.0]])
    sy = csc_matrix([[0.0, -1j], [1j, 0.0]])
    sz = csc_matrix([[1.0, 0.0], [0.0, -1.0]])
    n = len(m) / 2  # number of sites
    R = [[None for i in range(n)] for j in range(n)]  # rotation matrix
    from scipy.linalg import expm  # exponenciate matrix

    for (i, v) in zip(range(n), vectors):  # loop over sites
        vv = np.sqrt(v.dot(v))  # norm of v
        if vv > 0.000001:  # if nonzero scale
            u = v / vv
        else:  # if zero put to zero
            u = np.array([0.0, 0.0, 0.0])
        #    rot = u[0]*sx + u[1]*sy + u[2]*sz
        uxy = np.sqrt(u[0] ** 2 + u[1] ** 2)  # component in xy plane
        phi = np.arctan2(u[1], u[0])
        theta = np.arctan2(uxy, u[2])
        r1 = phi * sz / 2.0  # rotate along z
        r2 = theta * sy / 2.0  # rotate along y
        # a factor 2 is taken out due to 1/2 of S
        rot = expm(1j * r2) * expm(1j * r1)
        R[i][i] = rot  # save term
    R = bmat(R)  # convert to full sparse matrix
    mout = R * csc_matrix(m) * R.H  # rotate matrix
    return mout.todense()  # return dense matrix
示例#2
0
def BW_CBS_sp(x,edge,m):
    
    x0 = edge[0]
    width = (edge[1]-edge[0])/(m-3)
    
    BasisMat_d = [];   BasisMat_r = [];   BasisMat_c = [];
    WeightMat1_d = []; WeightMat1_r = []; WeightMat1_c = [];
    WeightMat2_d = []; WeightMat2_r = []; WeightMat2_c = [];
    
    
    for i in np.arange(-3,m-3):
        
        [index,data] = Basis_CBS_i_sp(x,x0+i*width,width)
        BasisMat_d.append(data);
        BasisMat_c.append((i+3)*np.ones(len(data),dtype=np.int64));
        BasisMat_r.append(index); 
        
        [index,data] = d1_Basis_CBS_i_sp(x,x0+i*width,width)
        WeightMat1_d.append(data);
        WeightMat1_c.append((i+3)*np.ones(len(data),dtype=np.int64));
        WeightMat1_r.append(index); 
        
        [index,data] = d2_Basis_CBS_i_sp(x,x0+i*width,width)
        WeightMat2_d.append(data);
        WeightMat2_c.append((i+3)*np.ones(len(data),dtype=np.int64));
        WeightMat2_r.append(index); 
        
    BasisMat =  spm.csc_matrix((np.hstack(BasisMat_d),(np.hstack(BasisMat_r),np.hstack(BasisMat_c))),shape=[len(x),m])
    d1_BasisMat =  spm.csc_matrix((np.hstack(WeightMat1_d),(np.hstack(WeightMat1_r),np.hstack(WeightMat1_c))),shape=[len(x),m])
    d2_BasisMat =  spm.csc_matrix((np.hstack(WeightMat2_d),(np.hstack(WeightMat2_r),np.hstack(WeightMat2_c))),shape=[len(x),m])
    
    WeightMat1 = d1_BasisMat.T.dot(d1_BasisMat)
    WeightMat2 = d2_BasisMat.T.dot(d2_BasisMat)
    
    return [BasisMat,WeightMat1,WeightMat2]
示例#3
0
文件: camera.py 项目: cadik/opendr
    def compute_dr_wrt(self, wrt):

        if wrt not in (self.v, self.rt, self.t):
            return
        
        if wrt is self.t:
            if not hasattr(self, '_drt') or self._drt.shape[0] != self.v.r.size:                
                IS = np.arange(self.v.r.size)
                JS = IS % 3
                data = np.ones(len(IS))
                self._drt = sp.csc_matrix((data, (IS, JS)))
            return self._drt
        
        if wrt is self.rt:
            rot, rot_dr = cv2.Rodrigues(self.rt.r)
            rot_dr = rot_dr.reshape((3,3,3))
            dr = np.einsum('abc, zc -> zba', rot_dr, self.v.r).reshape((-1,3))
            return dr
        
        if wrt is self.v:
            rot = cv2.Rodrigues(self.rt.r)[0]
            
            IS = np.repeat(np.arange(self.v.r.size), 3)
            JS = np.repeat(np.arange(self.v.r.size).reshape((-1,3)), 3, axis=0)
            data = np.vstack([rot for i in range(self.v.r.size/3)])
            result = sp.csc_matrix((data.ravel(), (IS.ravel(), JS.ravel())))
            return result
	def process(self,Y,X=None,Xt=None,tests=None):
		self.setYX(Y,X,Xt)
		bivariter = 0
		sumSSE = 0
		esiter = list()
		es.state()["iterations"] = esiter
		# in the first iteration we calculate W by using ones on U
		U = ssp.csc_matrix(ones(self.u.shape))
		while True:
			esiterdict = dict()
			esiterdict["i"] = bivariter
			logger.debug("Starting iteration: %d"%bivariter)
			bivariter += 1
			W,w_bias,err = self.calculateW(U,tests=tests)
			esiterdict["w"] = W
			esiterdict["w_sparcity"] = (abs(W) > 0).sum()
			esiterdict["w_bias"] = w_bias
			esiterdict["w_test_err"] = err
			if "test" in err: logger.debug("W sparcity=%d,test_total_err=%2.2f,test_err=%s"%(esiterdict["w_sparcity"],err['test']["totalsse"],str(err['test']["diffsse"])))
			W = ssp.csc_matrix(W)
			U,u_bias,err = self.calculateU(W,tests=tests)
			esiterdict["u"] = U
			esiterdict["u_sparcity"] = (abs(U) > 0).sum()
			esiterdict["u_bias"] = u_bias
			esiterdict["u_test_err"] = err
			if "test" in err: logger.debug("U sparcity=%d,test_total_err=%2.2f,test_err=%s"%(esiterdict["u_sparcity"],err['test']["totalsse"],str(err['test']["diffsse"])))
			U = ssp.csc_matrix(U)
			self.u = U
			self.w = W
			self.w_bias = w_bias
			self.u_bias = u_bias
			esiter += [esiterdict]
			if self.allParams['bivar_max_it'] <= bivariter:
				break
		return sumSSE
示例#5
0
    def test_scipy_sparse(self):
        """Test scipy sparse matrices."""
        # Constants.
        A = numpy.matrix( numpy.arange(8).reshape((4,2)) )
        A = sp.csc_matrix(A)
        A = sp.eye(2).tocsc()
        key = (slice(0, 1, None), slice(None, None, None))
        Aidx = intf.index(A, (slice(0, 2, None), slice(None, None, None)))
        Aidx = intf.index(Aidx, key)
        self.assertEqual(Aidx.shape, (1, 2))
        self.assertEqual(Aidx[0,0], 1)
        self.assertEqual(Aidx[0,1], 0)

        # Linear ops.
        var = Variable(4, 2)
        A = numpy.matrix( numpy.arange(8).reshape((4,2)) )
        A = sp.csc_matrix(A)
        B = sp.hstack([A, A])
        self.assertExpression(var + A, (4, 2))
        self.assertExpression(A + var, (4, 2))
        self.assertExpression(B * var, (4, 2))
        self.assertExpression(var - A, (4, 2))
        self.assertExpression(A - A - var, (4, 2))
        if PY35:
            self.assertExpression(var.__rmatmul__(B), (4,2))
	def optimise_lambda(self, lambda_w, lambda_u, Yparts, Xparts,w_lambda=None,u_lambda=None):
		logger.debug("... expanding Yparts")
		Yparts = Yparts.apply(BatchBivariateLearner._expandY)

		ls = LambdaSearch(self.part_eval)
		ntasks = Yparts.train_all.shape[1]
		ndays = Yparts.train_all.shape[0]/ntasks
		nusers = Xparts.train_all.shape[1]/ndays

		u = ssp.csc_matrix(ones((nusers,ntasks)))
		logger.debug("... Preparing VPrime")
		Vprime_parts = Xparts.apply(
			BatchBivariateLearner._calculateVprime,u
		)
		if w_lambda is None:
			logger.debug("... Optimising lambda for w")
			ls.optimise(self.w_func,lambda_w,Vprime_parts,Yparts,name="w")
		else:
			logger.debug("... Setting hardcoded w: %2.2f"%w_lambda)
			self.w_func.params['lambda1'] = w_lambda

		logger.debug("... Calculating w with optimal lambda")
		w,bias = self.w_func.call(Vprime_parts.train_all,Yparts.train_all)
		w = ssp.csc_matrix(w)
		logger.debug("... Preparing Dprime")
		Dprime_parts = Xparts.apply(
			BatchBivariateLearner._calculateDprime,w,u.shape
		)
		if u_lambda is None:
			logger.debug("... Optimising lambda for u")
			ls.optimise(self.u_func, lambda_u, Dprime_parts, Yparts,name="u")
		else:
			logger.debug("... Setting hardcoded w: %2.2f"%u_lambda)
			self.u_func.params['lambda1'] = u_lambda
		return [(u,self.u_func.params['lambda1']),(w,self.w_func.params['lambda1'])]
示例#7
0
    def _assemble(self, mu=None):
        g = self.grid
        bi = self.boundary_info

        if g.dim > 2:
            raise NotImplementedError

        if bi is None or not bi.has_robin or self.robin_data is None:
            return coo_matrix((g.size(g.dim), g.size(g.dim))).tocsc()

        RI = bi.robin_boundaries(1)
        if g.dim == 1:
            robin_c = self.robin_data[0](g.centers(1)[RI], mu=mu)
            I = coo_matrix((robin_c, (RI, RI)), shape=(g.size(g.dim), g.size(g.dim)))
            return csc_matrix(I).copy()
        else:
            xref = g.quadrature_points(1, order=self.order)[RI]
            # xref(robin-index, quadraturepoint-index)
            if self.robin_data[0].shape_range == ():
                robin_c = self.robin_data[0](xref, mu=mu)
            else:
                robin_elements = g.superentities(1, 0)[RI, 0]
                robin_indices = g.superentity_indices(1, 0)[RI, 0]
                normals = g.unit_outer_normals()[robin_elements, robin_indices]
                robin_values = self.robin_data[0](xref, mu=mu)
                robin_c = np.einsum('ei,eqi->eq', normals, robin_values)

            # robin_c(robin-index, quadraturepoint-index)
            q, w = line.quadrature(order=self.order)
            SF = np.squeeze(np.array([1 - q, q]))
            SF_INTS = np.einsum('ep,pi,pj,e,p->eij', robin_c, SF, SF, g.integration_elements(1)[RI], w).ravel()
            SF_I0 = np.repeat(g.subentities(1, g.dim)[RI], 2).ravel()
            SF_I1 = np.tile(g.subentities(1, g.dim)[RI], [1, 2]).ravel()
            I = coo_matrix((SF_INTS, (SF_I0, SF_I1)), shape=(g.size(g.dim), g.size(g.dim)))
            return csc_matrix(I).copy()
示例#8
0
    def _grad(self,values):
        """
        Gives the (sub/super)gradient of the atom w.r.t. each argument.

        Matrix expressions are vectorized, so the gradient is a matrix.

        Args:
            values: A list of numeric values for the arguments.

        Returns:
            A list of SciPy CSC sparse matrices or None.
        """
        X = np.matrix(values[0])
        P = np.matrix(values[1])
        try:
            P_inv = LA.inv(P)
        except LA.LinAlgError:
            return [None,None]
        # partial_X = (P^-1+P^-T)X
        # partial_P = - (P^-1 * X * X^T * P^-1)^T
        else:
            DX = np.dot(P_inv+np.transpose(P_inv), X)
            DX = DX.T.ravel(order='F')
            DX = sp.csc_matrix(DX).T

            DP = P_inv.dot(X)
            DP = DP.dot(X.T)
            DP = DP.dot(P_inv)
            DP = -DP.T
            DP = sp.csc_matrix(DP.T.ravel(order='F')).T
            return [DX, DP]
示例#9
0
文件: to_matrix.py 项目: pymor/pymor
def test_to_matrix_LincombOperator():
    np.random.seed(0)
    A = np.random.randn(3, 3)
    B = np.random.randn(3, 2)
    a = np.random.randn()
    b = np.random.randn()
    C = a * A + b * B.dot(B.T)

    Aop = NumpyMatrixOperator(A)
    Bop = NumpyMatrixOperator(B)
    Cop = Aop * a + (Bop @ Bop.H) * b
    assert_type_and_allclose(C, Cop, 'dense')

    Aop = NumpyMatrixOperator(sps.csc_matrix(A))
    Bop = NumpyMatrixOperator(B)
    Cop = Aop * a + (Bop @ Bop.H) * b
    assert_type_and_allclose(C, Cop, 'dense')

    Aop = NumpyMatrixOperator(A)
    Bop = NumpyMatrixOperator(sps.csc_matrix(B))
    Cop = Aop * a + (Bop @ Bop.H) * b
    assert_type_and_allclose(C, Cop, 'dense')

    Aop = NumpyMatrixOperator(sps.csc_matrix(A))
    Bop = NumpyMatrixOperator(sps.csc_matrix(B))
    Cop = Aop * a + (Bop @ Bop.H) * b
    assert_type_and_allclose(C, Cop, 'sparse')
示例#10
0
文件: to_matrix.py 项目: pymor/pymor
def test_to_matrix_Concatenation():
    np.random.seed(0)
    A = np.random.randn(2, 3)
    B = np.random.randn(3, 4)
    C = A.dot(B)

    Aop = NumpyMatrixOperator(A)
    Bop = NumpyMatrixOperator(B)
    Cop = Aop @ Bop
    assert_type_and_allclose(C, Cop, 'dense')

    Aop = NumpyMatrixOperator(sps.csc_matrix(A))
    Bop = NumpyMatrixOperator(B)
    Cop = Aop @ Bop
    assert_type_and_allclose(C, Cop, 'dense')

    Aop = NumpyMatrixOperator(A)
    Bop = NumpyMatrixOperator(sps.csc_matrix(B))
    Cop = Aop @ Bop
    assert_type_and_allclose(C, Cop, 'dense')

    Aop = NumpyMatrixOperator(sps.csc_matrix(A))
    Bop = NumpyMatrixOperator(sps.csc_matrix(B))
    Cop = Aop @ Bop
    assert_type_and_allclose(A, Aop, 'sparse')
def test_csc_row_median():
    # Test csc_row_median actually calculates the median.

    # Test that it gives the same output when X is dense.
    rng = np.random.RandomState(0)
    X = rng.rand(100, 50)
    dense_median = np.median(X, axis=0)
    csc = sp.csc_matrix(X)
    sparse_median = csc_median_axis_0(csc)
    assert_array_equal(sparse_median, dense_median)

    # Test that it gives the same output when X is sparse
    X = rng.rand(51, 100)
    X[X < 0.7] = 0.0
    ind = rng.randint(0, 50, 10)
    X[ind] = -X[ind]
    csc = sp.csc_matrix(X)
    dense_median = np.median(X, axis=0)
    sparse_median = csc_median_axis_0(csc)
    assert_array_equal(sparse_median, dense_median)

    # Test for toy data.
    X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
    csc = sp.csc_matrix(X)
    assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
    X = [[0, -2], [-1, -5], [1, -3]]
    csc = sp.csc_matrix(X)
    assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))

    # Test that it raises an Error for non-csc matrices.
    assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
示例#12
0
文件: mlp.py 项目: howonlee/stupidmlp
    def propagate_backward(self, target, lrate=0.01):
        ''' Back propagate error related to target using lrate. '''
        begin_time = time.clock()

        deltas = []

        # Compute error on output layer
        error = sci_sp.csc_matrix(target - self.layers[-1])
        delta = error.multiply(mat_dsigmoid(self.layers[-1]))
        deltas.append(delta)

        # Compute error on hidden layers
        for i in range(len(self.shape)-2,0,-1):
            error = sci_sp.csc_matrix(deltas[0].dot(self.weights[i].T))
            delta = error.multiply(mat_dsigmoid(self.layers[i]))
            deltas.insert(0,delta)

        # Update weights: this is the bit that scales
        for i in range(len(self.weights)):
            if i < len(self.weights)-1:
                dw = sparse_outer(self.layers[i], deltas[i], self.sparsifiers[i])
            else:
                dw = self.layers[i].T.dot(deltas[i]) ### dense at the last one
            self.weights[i] += lrate*dw

        # Return error
        end_time = time.clock()
        self.bp_times.append(end_time - begin_time)
        return error.sum()
示例#13
0
文件: geometry.py 项目: cadik/opendr
    def compute_dr_wrt(self, obj):
        if obj not in (self.a, self.b):
            return None
            
        sz = self.a.r.size
        if not hasattr(self, 'indices') or self.indices.size != sz*3:
            self.indptr = np.arange(0,(sz+1)*3,3)
            idxs = col(np.arange(0,sz))
            idxs = np.hstack([idxs, idxs, idxs])
            idxs = idxs.reshape((-1,3,3))
            idxs = idxs.transpose((0,2,1)).ravel()
            self.indices = idxs

        if obj is self.a:
            # m = self.Bx
            # matvec = lambda x : _call_einsum_matvec(m, x)
            # matmat = lambda x : _call_einsum_matmat(m, x)
            # return sp.linalg.LinearOperator((self.a1.size*3, self.a1.size*3), matvec=matvec, matmat=matmat)
            data = self.Bx.ravel()
            result = sp.csc_matrix((data, self.indices, self.indptr), shape=(sz, sz))
            return -result


        elif obj is self.b:
            # m = self.Ax
            # matvec = lambda x : _call_einsum_matvec(m, x)
            # matmat = lambda x : _call_einsum_matmat(m, x)
            # return sp.linalg.LinearOperator((self.a1.size*3, self.a1.size*3), matvec=matvec, matmat=matmat)
            data = self.Ax.ravel()
            result = sp.csc_matrix((data, self.indices, self.indptr), shape=(sz, sz))
            return -result
示例#14
0
def features_sparse (A):
	from scipy.sparse import csc_matrix
	from shogun import SparseRealFeatures
	from numpy import array, float64, all

	# sparse representation X of dense matrix A
	# note, will work with types other than float64 too,
	# but requires recent scipy.sparse
	X=csc_matrix(A)
	#print(A)

	# create sparse shogun features from dense matrix A
	a=SparseRealFeatures(A)
	a_out=a.get_full_feature_matrix()
	#print(a_out)
	assert(all(a_out==A))
	#print(a_out)

	# create sparse shogun features from sparse matrix X
	a.set_sparse_feature_matrix(X)
	a_out=a.get_full_feature_matrix()
	#print(a_out)
	assert(all(a_out==A))

	# create sparse shogun features from sparse matrix X
	a=SparseRealFeatures(X)
	a_out=a.get_full_feature_matrix()
	#print(a_out)
	assert(all(a_out==A))

	# obtain (data,row,indptr) csc arrays of sparse shogun features
	z=csc_matrix(a.get_sparse_feature_matrix())
	z_out=z.todense()
	#print(z_out)
	assert(all(z_out==A))
示例#15
0
    def makearrays(self):
        #convert vector which stores dictionary of features into sparse array based on fk_idx

        rows=[]
        cols=[]
        data=[]
        #thisrow=[]
        rowpointer=0

        for vector in self.vectordict.values():
            vector.rowindex=rowpointer
            temparray=numpy.zeros(self.dim)
            for feature in vector.features.keys():
                col = int(self.fk_idx[feature])
                score = float(vector.features[feature])
                temparray[col]=score
                rows.append(rowpointer)
                cols.append(col)
                data.append(score)

            vector.array = sparse.csc_matrix(temparray)
            rowpointer+=1
            #print rows
        #print cols
        #print data

        self.fullmatrix = sparse.csc_matrix((numpy.array(data),(numpy.array(rows),numpy.array(cols))))
示例#16
0
def majorana_invariant(intra,inter,kp=1000):
  """ Calculates the topological invariant for a 1d topological
  superconductor, returns a list of determinants of the upper diagonal"""
  raise
  ks = np.arange(0.5,1.5,1.0/kp) # create kpoints
  dets = [0. for k in ks]  # create list with determinants
  # rotate the matrices into a non diagonal block for
  # assume that h = sz*h_0 + i sy * h_delta
  # and perfor a rotation e^-i*pi/4 sy
  rot = intra * 0.0
  n = len(intra)/2 # number of orbitals including spin
  # create rotation matrix
  intra_a = np.matrix([[0.0j for i in range(n)] for j in range(n)])
  inter_a = np.matrix([[0.0j for i in range(n)] for j in range(n)])
  print csc_matrix(intra-intra.H)
  for i in range(n):
    for j in range(n):
      # couples electron in i with hole in j
      s = 1.0
      if i<j:
        s = -1.0
      intra_a[i,j] = 1j*s*intra[2*i,2*j+1] + intra[2*i,2*j]
#      intra_a[i,j] = intra[2*i,2*j+1]
      inter_a[i,j] = 1j*s*inter[2*i,2*j+1] + inter[2*i,2*j]
#  print csc_matrix(intra_a)
  fm = open("WINDING_MAJORANA.OUT","w")
  fm.write("# kpoint,     phase")
  for k in ks:
    tk = inter_a * np.exp(1j*2.*np.pi*k)
    hk = intra_a + tk + tk.H # kdependent k
    det = lg.det(hk)
    phi = np.arctan2(det.imag,det.real)
    fm.write(str(k)+"    "+str(phi)+"\n")
  fm.close()  # close the file
def load_lda_dataset(uid):
    fname = join(DATASETS_FOLDER, 'es_twlda25ds_%d.npz' % uid)

    z = np.load(open(fname,'rb'))
    X_train = z['arr_0'].item()
    X_valid = z['arr_1'].item()
    X_test = z['arr_2'].item()

    # X_train = csc.csc_matrix(X_train.tolist())
    # X_valid = csc.csc_matrix(X_train.tolist())
    # X_test = csc.csc_matrix(X_test.tolist())

    cols_train = X_train.shape[1]
    cols_valid = X_valid.shape[1]
    cols_test = X_test.shape[1]

    maxcols = max(cols_train, cols_valid, cols_test)

    if cols_train < maxcols:
        missing_cols = csc_matrix((X_train.shape[0], maxcols - cols_train), dtype=np.float64)
        X_train = sp.hstack((X_train, missing_cols))

    if cols_valid < maxcols:
        missing_cols = csc_matrix((X_valid.shape[0], maxcols - cols_valid), dtype=np.float64)
        X_valid = sp.hstack((X_valid, missing_cols))

    if cols_test < maxcols:
        missing_cols = csc_matrix((X_test.shape[0], maxcols - cols_test), dtype=np.float64)
        X_test = sp.hstack((X_test, missing_cols))

    ys_fname = join(DATAFRAMES_FOLDER, "ysv_%d_small.pickle" % uid)
    y_train, y_valid, y_test = pickle.load(open(ys_fname, 'rb'))

    return X_train, X_valid, X_test, y_train, y_valid, y_test
示例#18
0
    def __init__(self, Year, pvalue = 0.01):

        A =Year.Adj
        v = float(A.sum())

        n, m = A.shape
        self.sets = (n, m)
        alpha = pvalue / float(n * m)
        in_degree = A.sum(0)
        out_degree = A.sum(1)
        i, j, aij = extract.find(A)
        
        nonzero = len(i)
        pij = np.zeros((nonzero, ))
        for h in xrange(nonzero):                      
            pij[h] = out_degree[i[h]] * in_degree[0,j[h]] / v**2
        P = 1-binom.cdf(aij - 1,v,pij)
        data = 1. * (P<= alpha)
        zero_entries = np.where(data == 0) 
        data = np.delete(data, zero_entries)
        i = np.delete(i, zero_entries)
        j = np.delete(j, zero_entries)
        aij = np.delete(aij,zero_entries)
        ij = np.asarray(zip(i,j)).T
        self.svnet = csc_matrix((data, ij))
        self.Adj = csc_matrix((aij,ij))
        self.filename = Year.filename
        self.edgetype = Year.edgetype
        self.banks = Year.banks
        self.firms = Year.firms
        self.descr = 'valid network'
示例#19
0
    def _update_hessian(self, it):
        """
        Provides quasi-newton approximation to the Hessian matrix.
        """
        it = self.it_list[-1]
        it_old = it.old
        s = it.x - it_old.x
        y = it.f_x + it.Aele_Aili - (it.old.f_x + it.old.Aele_Aili)
        hess = it.hessian

        if self.hess_approx == 'SR1':
            temp = y - it.hess * s
            temp2 = dot(temp.T, s)
            if abs(temp2) >= self.eta * norm(s) * norm(temp):
                temp = dot(temp, temp.T) / temp2
                temp = csc_matrix(temp)
                hess = hess + temp
        elif self.hess_approx == 'BFGS':
            H_s = hess * s
            s_T_H_s = dot(s.T, H_s)
            s_T_y = dot(s.T, y)
            # Choose size for theta
            if s_T_y >= 0.2 * s_T_H_s:
                theta = 1.
            else:
                theta = ((0.8 * s_T_H_s) / (s_T_H_s - s_T_y))
            r = theta * y + (1. - theta) * H_s
            temp = csc_matrix( - dot(H_s, H_s.T) / s_T_H_s + dot(r, r.T) / dot(s.T, r))
            hess = hess + temp
        else:
            raise ValueError('Invalid Hessian approximation style chosen')

        return hess
示例#20
0
def test_min_max_axis1():
    X = np.array([[0, 3, 0],
                  [2, -1, 0],
                  [0, 0, 0],
                  [9, 8, 7],
                  [4, 0, 5]], dtype=np.float64)
    X_csr = sp.csr_matrix(X)
    X_csc = sp.csc_matrix(X)

    mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
    assert_array_equal(mins_csr, X.min(axis=1))
    assert_array_equal(maxs_csr, X.max(axis=1))

    mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
    assert_array_equal(mins_csc, X.min(axis=1))
    assert_array_equal(maxs_csc, X.max(axis=1))

    X = X.astype(np.float32)
    X_csr = sp.csr_matrix(X)
    X_csc = sp.csc_matrix(X)
    mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
    assert_array_equal(mins_csr, X.min(axis=1))
    assert_array_equal(maxs_csr, X.max(axis=1))
    mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
    assert_array_equal(mins_csc, X.min(axis=1))
    assert_array_equal(maxs_csc, X.max(axis=1))
示例#21
0
def block2full(ht,sparse=False):
  """Convert a heterostructure with block diagonal Hamiltonian
  into the full form"""
  if not ht.block_diagonal: return ht # stop
  ho = ht.copy()
  ho.block_diagonal = False # set in false from now on
  nb = len(ht.central_intra) # number of blocks
  lc = [csc_matrix(ht.central_intra[i][i].shape) for i in range(nb)]
  rc = [csc_matrix(ht.central_intra[i][i].shape) for i in range(nb)]
  lc[0] = csc_matrix(ht.left_coupling)
  rc[nb-1] = csc_matrix(ht.right_coupling)
  # convert the central to sparse form
  central = [[None for i in range(nb)] for j in range(nb)]
  for i in range(nb):
    for j in range(nb):
      if ht.central_intra[i][j] is None: continue
      else:
        central[i][j] = csc_matrix(ht.central_intra[i][j])
  from scipy.sparse import vstack
  if sparse:
    ho.left_coupling = vstack(lc)
    ho.right_coupling = vstack(rc)
    ho.central_intra = bmat(ht.central_intra) # as sparse matrix
  else:
    ho.left_coupling = vstack(lc).todense()
    ho.right_coupling = vstack(rc).todense()
    ho.central_intra = bmat(central).todense() # as dense matrix
  return ho
def cernikov_filter(wts, fts=None):
    """ Remove any of the working transversals that are minimal versions of each other """
    wt_i = 0
    wts = sparse.csc_matrix(wts)
    if fts is not None:
        fts = sparse.csc_matrix(fts)

    while wt_i < wts.shape[1]:
        target_t = wts[:, wt_i]
        left_t = wts[:, :max(wt_i, 0)]
        right_t = wts[:, min(wt_i+1, wts.shape[1]):]

        assert(left_t.shape[1] + right_t.shape[1] + target_t.shape[1] == wts.shape[1]), "Left/Right split failed."

        left_right_t = sparse.hstack((left_t, right_t))
        if fts is not None:
            check_ts = sparse.hstack((left_right_t, fts))
        else:
            check_ts = left_right_t

        if is_minimal_present(target_t, check_ts):
            wts = left_right_t # The new wts to loop over
            # [logic] wt_i = wt_i # Don't increase
        else:
            # [logic] wts = wts # Keep target
            wt_i += 1
    return wts
示例#23
0
def test_fistatree():
	params = {
		"loss":"square",
		"lambda1": 0.1, 
		"it0": 3,
		"regul":"l2",
		"max_it": 10,
		"intercept" : True
	}

	user = 10
	word = 5
	day = 3
	tasks = 1
	own_variables =  np.array([0],dtype=np.int32)
	N_own_variables =  np.array([word],dtype=np.int32)
	eta_g = np.array([1],dtype=np.float64)
	tree = {
		'eta_g': eta_g,
		'groups' : ssp.csc_matrix((1,1),dtype=bool),
		'own_variables' :own_variables,
        'N_own_variables' : N_own_variables
    }

	ff = sf.FistaTree(tree=tree,params=params)
	x = rand(day,word)
	y = rand(day,tasks)
	w,b = ff.call(ssp.csc_matrix(x),np.asfortranarray(y))
示例#24
0
    def _column_grad(self, value):
        """Gives the (sub/super)gradient of the atom w.r.t. a column argument.

        Matrix expressions are vectorized, so the gradient is a matrix.

        Args:
            value: A numeric value for a column.

        Returns:
            A NumPy ndarray matrix or None.
        """
        rows = self.args[0].size[0]*self.args[0].size[1]
        value = np.matrix(value)
        # Outside domain.
        if self.p < 1 and np.any(value <= 0):
            return None
        D_null = sp.csc_matrix((rows, 1), dtype='float64')
        if self.p == 1:
            D_null += (value > 0)
            D_null -= (value < 0)
            return sp.csc_matrix(D_null.A.ravel(order='F')).T
        denominator = np.linalg.norm(value, float(self.p))
        denominator = np.power(denominator, self.p - 1)
        # Subgrad is 0 when denom is 0 (or undefined).
        if denominator == 0:
            if self.p >= 1:
                return D_null
            else:
                return None
        else:
            nominator = np.power(value, self.p - 1)
            frac = np.divide(nominator, denominator)
            return np.reshape(frac.A, (frac.size, 1))
示例#25
0
def test_als_warm_start():
    X, y, coef = make_user_item_regression(label_stdev=0)
    from sklearn.cross_validation import train_test_split
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.33, random_state=42)
    X_train = sp.csc_matrix(X_train)
    X_test = sp.csc_matrix(X_test)

    fm = als.FMRegression(n_iter=10, l2_reg_w=0, l2_reg_V=0, rank=2)
    fm.fit(X_train, y_train)
    y_pred = fm.predict(X_test)
    error_10_iter = mean_squared_error(y_pred, y_test)

    fm = als.FMRegression(n_iter=5, l2_reg_w=0, l2_reg_V=0, rank=2)
    fm.fit(X_train, y_train)
    print fm.iter_count
    y_pred = fm.predict(X_test)
    error_5_iter = mean_squared_error(y_pred, y_test)

    fm.fit(sp.csc_matrix(X_train), y_train, n_more_iter=5)
    print fm.iter_count
    y_pred = fm.predict(X_test)
    error_5_iter_plus_5 = mean_squared_error(y_pred, y_test)

    print error_5_iter, error_5_iter_plus_5, error_10_iter

    assert error_10_iter == error_5_iter_plus_5
def prep_w_graphbit(W,T,R):
	# set up the group regul
	wrindex = arange(R*T*W).reshape([R,T,W])
	ngroups = W * (T + R)
	eta_g = ones(ngroups,dtype = np.float)
	groups = ssp.csc_matrix(
		(ngroups,ngroups),dtype = np.bool
	)

	groups_var = ssp.dok_matrix((W*R*T,ngroups),dtype=np.bool)
	i = 0
	logger.debug("Creating the sausage groups")
	allgs = []
	for r in range(R):
		for word in range(W):
			groups_var[wrindex[r,:,word],i] = 1
			allgs += [wrindex[r,:,word]]
			i+=1
	for t in range(T):
		for word in range(W):
			groups_var[wrindex[:,t,word],i] = 1
			allgs += [wrindex[:,t,word]]
			i+=1

	logger.debug("Done creating group_var")
	groups_var = ssp.csc_matrix(groups_var,dtype=np.bool)
	graph = {'eta_g': eta_g,'groups' : groups,'groups_var' : groups_var}
	return graph, allgs
示例#27
0
    def __init__(self, Year, pvalue = 0.01):

        A = Year.Adj
        
        n, m = A.shape
        alpha = pvalue / n / m
        in_degree = A.sum(0)
        out_degree = A.sum(1)
        i, j, wij = extract.find(A)
        indices = np.where(wij > 0)
        eps = max(wij[indices].min(),0.1)
        v = A.sum() / eps
        
        nonzero = len(i)
        pij = np.zeros((nonzero, ))
        for h in xrange(nonzero):                      
            pij[h] = out_degree[i[h]] * in_degree[0,j[h]] / v**2
        P = 1 - binom.cdf(wij - 1,v,pij)
        data = P <= alpha
        zero_entries = np.where(data == 0)
        data = np.delete(data, zero_entries)
        i = np.delete(i, zero_entries)
        j = np.delete(j, zero_entries)
        wij = np.delete(wij,zero_entries)
        ij = np.asarray(zip(i,j)).T
        self.svnet = csc_matrix((data, ij), shape = (n,m) )
        self.Adj = csc_matrix((wij, ij), shape = (n,m) )
        self.nodes = Year.nodes
        self.filename = Year.filename
        self.edgetype = Year.edgetype
示例#28
0
def volume_mass_stiffness_smooth(triangles, vertices, nb_iter=1, diffusion_step=1.0, flow_file=None):
    vertices_csc = csc_matrix(vertices)
    curvature_normal_mtx = mean_curvature_normal_matrix(triangles, vertices, area_weighted=False)
    
    if isinstance(diffusion_step, (int, long, float)):
        diffusion_step = diffusion_step*np.ones(len(vertices))
    
    if flow_file is not None:
        mem_map = np.memmap(flow_file, dtype=G_DTYPE, mode='w+', shape=(nb_iter, vertices.shape[0], vertices.shape[1]))

    for i in range(nb_iter):
        stdout.write("\r step %d on %d done" % (i,  nb_iter))
        stdout.flush()
        if flow_file is not None:
            mem_map[i] = vertices_csc.toarray()
        # get curvature_normal_matrix
        mass_mtx = mass_matrix(triangles, vertices)
        
        raise NotImplementedError()
        # (D - d*L)*y = D*x = b
        A_matrix = mass_mtx - diags(diffusion_step,0).dot(curvature_normal_mtx)
        b_matrix = mass_mtx.dot(csc_matrix(vertices_csc))
        next_vertices = spsolve(A_matrix, b_matrix)
        # test if direction is positive
        direction = next_vertices.toarray() - vertices_csc
        normal_dir = vertices_cotan_normal(triangles, next_vertices, normalize=True)
        dotv = normalize_vectors(direction).multiply(normal_dir)
        vertices_csc += direction * np.maximum(0.0, -dotv)
        # vertices_csc += direction * sigmoid(-np.arctan(dotv)*np.pi - np.pi)
        # vertices_csc += direction * softplus(-dotv)
        
    stdout.write("\r step %d on %d done \n" % (nb_iter,  nb_iter))
    return vertices_csc.toarray()
示例#29
0
  def test_matrices(self):

    from scipy.sparse import csc_matrix


    for D in [    
          csc_matrix(([1.0,3.0,2.0,4.0],[0,1,0,1],[0,2,4]),shape=(2,2),dtype=numpy.double),
          csc_matrix(([1,3,2,4],[0,1,0,1],[0,2,4]),shape=(2,2),dtype=numpy.int),
          numpy.matrix([[1,2],[3,4]]),
          numpy.matrix([[1,2],[3,4.0]]),
          numpy.array([[1,2],[3,4]]),
          numpy.array([[1,2],[3,4.0]]),
          DMatrix([[1,2],[3,4]]).toCsc_matrix()
        ]:
      print D
      d = DMatrix.ones(2,2)
      
      x = SX.sym("x",d.sparsity())
      f = SXFunction([x],[x])
      f.init()
      f.setInput(D)

      self.checkarray(f.getInput(),DMatrix([[1,2],[3,4]]))
      d.set(D)
      self.checkarray(d,DMatrix([[1,2],[3,4]]))
示例#30
0
    def tune_by_cv(self, orig_X, all_y, alpha_values, td_splits, n_dev_folds, reuser=None, verbose=1):
        X = sparse.csc_matrix(orig_X)
        n, p = X.shape
        codes = all_y.columns
        n_codes = len(codes)
        alphas = pd.DataFrame(np.zeros([1, n_codes]), index=['alpha'], columns=codes)
        print "order = ", self.order

        for i, code in enumerate(self.order):
            y = all_y[code].as_matrix()
            model = self.models[code]
            valid_f1_summary, best_alpha = model.tune_by_cv(X, y, alpha_values, td_splits, n_dev_folds,
                                                                        reuser=reuser, verbose=verbose)
            alphas.loc['alpha', code] = best_alpha
            predictions = model.predict(X)
            if len(predictions.shape) == 1:
                predictions = predictions.reshape((predictions.size, 1))
                predictions_sp = sparse.csc_matrix(predictions)
            elif predictions.shape[0] == 1:
                predictions_sp = sparse.csc_matrix(predictions.T)
            else:
                predictions_sp = sparse.csc_matrix(predictions)

            X = sparse.csc_matrix(sparse.hstack([X, predictions_sp]))

            if verbose > 0:
                print i, code, y.sum(), best_alpha, valid_f1_summary.mean(axis=0)[str(best_alpha)]

        return alphas
示例#31
0
 def test_jw_sparse_twobody(self):
     expected = csc_matrix(([1, 1], ([6, 14], [5, 13])), shape=(16, 16))
     self.assertTrue(
         numpy.allclose(
             jordan_wigner_sparse(FermionOperator('2^ 1^ 1 3')).A,
             expected.A))
示例#32
0
import pandas as pd
from scipy.optimize._differentialevolution import DifferentialEvolutionSolver
from scipy.sparse import csc_matrix, csr_matrix

from bayesian_decision_tree.classification import PerpendicularClassificationTree, HyperplaneClassificationTree
from bayesian_decision_tree.hyperplane_optimization import ScipyOptimizer, RandomTwoPointOptimizer
from bayesian_decision_tree.hyperplane_optimization import SimulatedAnnealingOptimizer, RandomHyperplaneOptimizer
from bayesian_decision_tree.regression import PerpendicularRegressionTree, HyperplaneRegressionTree

# possible data matrix types/transforms that need to work for fit()
data_matrix_transforms = [
    lambda X: X, lambda X: csc_matrix(X), lambda X: csr_matrix(X), lambda X: pd
    .DataFrame(data=X, columns=['col-{}'.format(i) for i in range(len(X[0]))]),
    lambda X: pd.DataFrame(
        data=X, columns=['col-{}'.format(i)
                         for i in range(len(X[0]))]).to_sparse()
]


# classification tree models in all flavours
def create_classification_trees(prior, partition_prior, prune=False):
    return [
        PerpendicularClassificationTree(partition_prior, prior, prune=prune),
        HyperplaneClassificationTree(partition_prior,
                                     prior,
                                     delta=0,
                                     prune=prune),
        HyperplaneClassificationTree(partition_prior,
                                     prior,
                                     delta=0,
                                     prune=prune,
示例#33
0
def determine_search_location(A,
                              dims,
                              method='ellipse',
                              min_size=3,
                              max_size=8,
                              dist=3,
                              expandCore=iterate_structure(
                                  generate_binary_structure(2, 1),
                                  2).astype(int),
                              dview=None):
    """
    compute the indices of the distance from the cm to search for the spatial component

    does it by following an ellipse from the cm or doing a step by step dilatation around the cm


    Parameters:
    ----------
    [parsed]
     cm[i]:
        center of mass of each neuron

     A[:, i]: the A of each components

     dims:
        the dimension of each A's ( same usually )

     dist:
        computed distance matrix

     dims: [optional] tuple
                x, y[, z] movie dimensions

    method: [optional] string
            method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
            if method is dilate this represents the kernel used for expansion

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int

    dims: [optional] tuple
             x, y[, z] movie dimensions

    Returns:
    --------
    dist_indicator: np.ndarray
        distance from the cm to search for the spatial footprint

    Raise:
    -------
    Exception('You cannot pass empty (all zeros) components!')
    """

    from scipy.ndimage.morphology import grey_dilation

    # we initialize the values
    if len(dims) == 2:
        d1, d2 = dims
    elif len(dims) == 3:
        d1, d2, d3 = dims
    d, nr = np.shape(A)
    A = csc_matrix(A)
    dist_indicator = scipy.sparse.csc_matrix((d, nr), dtype=np.float32)

    if method == 'ellipse':
        Coor = dict()
        # we create a matrix of size A.x of each pixel coordinate in A.y and inverse
        if len(dims) == 2:
            Coor['x'] = np.kron(np.ones(d2), list(range(d1)))
            Coor['y'] = np.kron(list(range(d2)), np.ones(d1))
        elif len(dims) == 3:
            Coor['x'] = np.kron(np.ones(d3 * d2), list(range(d1)))
            Coor['y'] = np.kron(np.kron(np.ones(d3), list(range(d2))),
                                np.ones(d1))
            Coor['z'] = np.kron(list(range(d3)), np.ones(d2 * d1))
        if not dist == np.inf:  # determine search area for each neuron
            cm = np.zeros((nr, len(dims)))  # vector for center of mass
            Vr = []  # cell(nr,1);
            dist_indicator = []
            pars = []
            # for each dim
            for i, c in enumerate(['x', 'y', 'z'][:len(dims)]):
                # mass center in this dim = (coor*A)/sum(A)
                cm[:, i] = old_div(np.dot(Coor[c], A[:, :nr].todense()),
                                   A[:, :nr].sum(axis=0))

            # parrallelizing process of the construct ellipse function
            for i in range(nr):
                pars.append([
                    Coor, cm[i], A[:, i], Vr, dims, dist, max_size, min_size, d
                ])
            if dview is None:
                res = list(map(construct_ellipse_parallel, pars))
            else:
                if 'multiprocessing' in str(type(dview)):
                    res = dview.map_async(construct_ellipse_parallel,
                                          pars).get(4294967)
                else:
                    res = dview.map_sync(construct_ellipse_parallel, pars)
            for r in res:
                dist_indicator.append(r)

            dist_indicator = (np.asarray(dist_indicator)).squeeze().T

        else:
            raise Exception('Not implemented')
            dist_indicator = True * np.ones((d, nr))

    elif method == 'dilate':
        for i in range(nr):
            A_temp = np.reshape(A[:, i].toarray(), dims[::-1])
            if len(expandCore) > 0:
                if len(expandCore.shape) < len(dims):  # default for 3D
                    expandCore = iterate_structure(
                        generate_binary_structure(len(dims), 1), 2).astype(int)
                A_temp = grey_dilation(A_temp, footprint=expandCore)
            else:
                A_temp = grey_dilation(A_temp, [1] * len(dims))

            dist_indicator[:, i] = scipy.sparse.coo_matrix(
                np.squeeze(np.reshape(A_temp, (d, 1)))[:, None] > 0)
    else:
        raise Exception('Not implemented')
        dist_indicator = True * np.ones((d, nr))

    return dist_indicator
示例#34
0
def discretize(vectors,
               *,
               copy=True,
               max_svd_restarts=30,
               n_iter_max=20,
               random_state=None):
    """Search for a partition matrix (clustering) which is closest to the
    eigenvector embedding.

    Parameters
    ----------
    vectors : array-like of shape (n_samples, n_clusters)
        The embedding space of the samples.

    copy : bool, default=True
        Whether to copy vectors, or perform in-place normalization.

    max_svd_restarts : int, default=30
        Maximum number of attempts to restart SVD if convergence fails

    n_iter_max : int, default=30
        Maximum number of iterations to attempt in rotation and partition
        matrix search if machine precision convergence is not reached

    random_state : int, RandomState instance, default=None
        Determines random number generation for rotation matrix initialization.
        Use an int to make the randomness deterministic.
        See :term:`Glossary <random_state>`.

    Returns
    -------
    labels : array of integers, shape: n_samples
        The labels of the clusters.

    References
    ----------

    - Multiclass spectral clustering, 2003
      Stella X. Yu, Jianbo Shi
      https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf

    Notes
    -----

    The eigenvector embedding is used to iteratively search for the
    closest discrete partition.  First, the eigenvector embedding is
    normalized to the space of partition matrices. An optimal discrete
    partition matrix closest to this normalized embedding multiplied by
    an initial rotation is calculated.  Fixing this discrete partition
    matrix, an optimal rotation matrix is calculated.  These two
    calculations are performed until convergence.  The discrete partition
    matrix is returned as the clustering solution.  Used in spectral
    clustering, this method tends to be faster and more robust to random
    initialization than k-means.

    """

    from scipy.sparse import csc_matrix
    from scipy.linalg import LinAlgError

    random_state = check_random_state(random_state)

    vectors = as_float_array(vectors, copy=copy)

    eps = np.finfo(float).eps
    n_samples, n_components = vectors.shape

    # Normalize the eigenvectors to an equal length of a vector of ones.
    # Reorient the eigenvectors to point in the negative direction with respect
    # to the first element.  This may have to do with constraining the
    # eigenvectors to lie in a specific quadrant to make the discretization
    # search easier.
    norm_ones = np.sqrt(n_samples)
    for i in range(vectors.shape[1]):
        vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) \
            * norm_ones
        if vectors[0, i] != 0:
            vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])

    # Normalize the rows of the eigenvectors.  Samples should lie on the unit
    # hypersphere centered at the origin.  This transforms the samples in the
    # embedding space to the space of partition matrices.
    vectors = vectors / np.sqrt((vectors**2).sum(axis=1))[:, np.newaxis]

    svd_restarts = 0
    has_converged = False

    # If there is an exception we try to randomize and rerun SVD again
    # do this max_svd_restarts times.
    while (svd_restarts < max_svd_restarts) and not has_converged:

        # Initialize first column of rotation matrix with a row of the
        # eigenvectors
        rotation = np.zeros((n_components, n_components))
        rotation[:, 0] = vectors[random_state.randint(n_samples), :].T

        # To initialize the rest of the rotation matrix, find the rows
        # of the eigenvectors that are as orthogonal to each other as
        # possible
        c = np.zeros(n_samples)
        for j in range(1, n_components):
            # Accumulate c to ensure row is as orthogonal as possible to
            # previous picks as well as current one
            c += np.abs(np.dot(vectors, rotation[:, j - 1]))
            rotation[:, j] = vectors[c.argmin(), :].T

        last_objective_value = 0.0
        n_iter = 0

        while not has_converged:
            n_iter += 1

            t_discrete = np.dot(vectors, rotation)

            labels = t_discrete.argmax(axis=1)
            vectors_discrete = csc_matrix(
                (np.ones(len(labels)), (np.arange(0, n_samples), labels)),
                shape=(n_samples, n_components))

            t_svd = vectors_discrete.T * vectors

            try:
                U, S, Vh = np.linalg.svd(t_svd)
                svd_restarts += 1
            except LinAlgError:
                print("SVD did not converge, randomizing and trying again")
                break

            ncut_value = 2.0 * (n_samples - S.sum())
            if ((abs(ncut_value - last_objective_value) < eps)
                    or (n_iter > n_iter_max)):
                has_converged = True
            else:
                # otherwise calculate rotation and continue
                last_objective_value = ncut_value
                rotation = np.dot(Vh.T, U.T)

    if not has_converged:
        raise LinAlgError('SVD did not converge')
    return labels
示例#35
0
def test_random_hasher_sparse_data():
    X, y = datasets.make_multilabel_classification(random_state=0)
    hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
    X_transformed = hasher.fit_transform(X)
    X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
    assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
示例#36
0
def test_ridge_sparse_svd():
    X = sp.csc_matrix(rng.rand(100, 10))
    y = rng.rand(100)
    ridge = Ridge(solver='svd')
    assert_raises(TypeError, ridge.fit, X, y)
示例#37
0
def hpf(X, k, max_iter=100, init_param=None):
    ### data preparation
    X = sp.csc_matrix(X, dtype=np.float64)
    M = X.copy()
    M.data = np.full(len(M.data), 1)
    n = X.shape[0]
    d = X.shape[1]
    vtw = np.full(max_iter, 0)
    etp_r = np.full(max_iter, 0)
    etp_c = np.full(max_iter, 0)
    nbIter = max_iter
    eps = 0.000000001

    #### Hyper parameter setting
    a = 0.3
    a_ = 0.3
    c = 0.3
    c_ = 0.3
    b_ = 1.
    d_ = 1.
    k_s = a_ + k * a
    t_s = c_ + k * c

    ##### Parameter initialization

    # shape gamma_uk matrix
    if init_param['G_s'] is None:
        G_s = np.random.gamma(a_, scale=b_ / a_, size=n * k).reshape(n, k)
    else:
        G_s = init_param['G_s']

    G_s = sp.csc_matrix(G_s, dtype=np.float64)

    # rate gamma_uk matrix
    if init_param['G_r'] is None:
        G_r = np.random.gamma(a_, scale=b_ / a_, size=n * k).reshape(n, k)
    else:
        G_r = init_param['G_r']

    G_r = sp.csc_matrix(G_r, dtype=np.float64)

    # shape lamda_ik matrix
    if init_param['L_s'] is None:
        L_s = np.random.gamma(c_, scale=d_ / c_, size=d * k).reshape(d, k)

    else:
        L_s = init_param['L_s']

    L_s = sp.csc_matrix(L_s, dtype=np.float64)

    # rate lamda_ik matrix
    if init_param['L_r'] is None:
        L_r = np.random.gamma(c_, scale=d_ / c_, size=d * k).reshape(d, k)
    else:
        L_r = init_param['L_r']
    L_r = sp.csc_matrix(L_r, dtype=np.float64)

    K_r = a_ / b_ + np.sum(G_s / G_r, 1)
    T_r = c_ / d_ + np.sum(L_s / L_r, 1)

    # Learning
    for iter_ in range(1, max_iter + 1):
        # Update multinomiale parameter no need to store phi only compute the sufficient stats
        logG_r = G_r.copy()
        logG_r.data = np.log(logG_r.data)
        digaG_s = G_s.copy()
        digaG_s.data = sc.special.digamma(digaG_s.data)

        logL_r = L_r.copy()
        logL_r.data = np.log(logL_r.data)
        digaL_s = L_s.copy()
        digaL_s.data = sc.special.digamma(digaL_s.data)

        Lt = digaG_s - logG_r
        Lt.data = np.exp(Lt.data)
        Lb = digaL_s - logL_r
        Lb.data = np.exp(Lb.data)

        del logG_r
        del digaG_s
        del logL_r
        del digaL_s

        Lt = Lt.todense()
        Lb = Lb.todense()

        # Update user related parameters
        G_s = a + np.multiply(Lt, ((X / (Lt * Lb.T + eps)) * Lb))
        G_r = np.repeat(np.sum(L_s / L_r, 0), n, axis=0) + np.divide(k_s, K_r)

        K_r = a_ / b_ + np.sum(G_s / G_r, 1)
        G_s = sp.csc_matrix(G_s)
        G_r = sp.csc_matrix(G_r)

        # Update item related parameters
        L_s = c + np.multiply(Lb, ((X.T / (Lb * Lt.T + eps)) * Lt))
        L_r = np.repeat(np.sum(G_s / G_r, 0), d, axis=0) + np.divide(t_s, T_r)

        T_r = c_ / d_ + np.sum(L_s / L_r, 1)
        L_s = sp.csc_matrix(L_s)
        L_r = sp.csc_matrix(L_r)

        Lt = sp.csc_matrix(Lt)
        Lb = sp.csc_matrix(Lb)
    # End of learning

    res = {'Z': G_s / G_r, 'W': L_s / L_r, 'll': vtw}

    return res
示例#38
0
        print("saving model to {}model.ckpt".format(prefix))
        saver.save(session, prefix + "model.ckpt", global_step=step)
        if (NUM_FEATURES > 0):
            print("user_embeddings:\n{}".format(np.around(uemb, 3)))
            print("movie_embeddings:\n{}".format(np.around(memb, 3)))
            np.savetxt(prefix + "user_embeddings.csv.gz",
                       uemb,
                       delimiter=',',
                       fmt="%.7f")
            np.savetxt(prefix + "movie_embeddings.csv.gz",
                       memb,
                       delimiter=',',
                       fmt="%.7f")
        else:
            print("NO EMBEDDINGS")
        np.savetxt(prefix + "user_bias.csv.gz",
                   ubias,
                   delimiter=',',
                   fmt="%.7f")
        np.savetxt(prefix + "movie_bias.csv.gz",
                   mbias,
                   delimiter=',',
                   fmt="%.7f")
else:
    print("Creating sparse matrix")
    A = sparse.csc_matrix(
        sparse.coo_matrix(train_labels, (train_data[:, 0], train_data[:, 1]),
                          shape=(NUM_USERS, NUM_MOVIES)))
    print("Calculating SVD")
    uemv, s, vemb = sparse.linalg.svds()
    def test_conditional_gp(self):
        ef = white_signals.MeasurementNoise(efac=parameter.Uniform(0.1, 5.0))
        tm = gp_signals.TimingModel()
        ec = gp_signals.EcorrBasisModel(log10_ecorr=parameter.Uniform(-10, -5))
        pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12),
                            gamma=parameter.Uniform(1, 7))
        rn = gp_signals.FourierBasisGP(spectrum=pl,
                                       components=10,
                                       combine=False)

        model = ef + tm + ec + rn
        pta = signal_base.PTA([model(self.psr), model(self.psr2)])

        p0 = {
            "B1855+09_basis_ecorr_log10_ecorr": -6.051740765663904,
            "B1855+09_efac": 2.9027266737466095,
            "B1855+09_red_noise_gamma": 6.9720332277819725,
            "B1855+09_red_noise_log10_A": -16.749192700991543,
            "B1937+21_basis_ecorr_log10_ecorr": -9.726747733721872,
            "B1937+21_efac": 3.959178240268702,
            "B1937+21_red_noise_gamma": 2.9030772884814797,
            "B1937+21_red_noise_log10_A": -17.978562921948992,
        }

        c = utils.ConditionalGP(pta)
        cmean = c.get_mean_coefficients(p0)

        # build index for the global coefficient vector
        idx, ntot = {}, 0
        for l, v in cmean.items():
            idx[l] = slice(ntot, ntot + len(v))
            ntot = ntot + len(v)

        # repeat the computation using the common-signal formalism
        TNrs = pta.get_TNr(p0)
        TNTs = pta.get_TNT(p0)
        phiinvs = pta.get_phiinv(p0, logdet=False, method="cliques")

        TNr = np.concatenate(TNrs)
        Sigma = sps.block_diag(TNTs, "csc") + sps.block_diag(
            [np.diag(phiinvs[0]), np.diag(phiinvs[1])])

        ch = cholesky(Sigma)
        mn = ch(TNr)
        iSigma = sps.linalg.inv(Sigma)

        # check mean values
        msg = "Conditional GP coefficient value does not match"
        for l, v in cmean.items():
            assert np.allclose(mn[idx[l]], v, atol=1e-4, rtol=1e-4), msg

        # check variances
        par = "B1937+21_linear_timing_model_coefficients"
        c1 = np.cov(
            np.array([cs[par] for cs in c.sample_coefficients(p0, n=10000)]).T)
        c2 = iSigma[idx[par], idx[par]].toarray().T
        msg = "Conditional GP coefficient variance does not match"
        assert np.allclose(c1, c2, atol=1e-4, rtol=1e-4), msg

        # check mean processes
        proc = "B1937+21_linear_timing_model"
        p1 = c.get_mean_processes(p0)[proc]
        p2 = np.dot(pta["B1937+21"]["linear_timing_model"].get_basis(),
                    mn[idx[par]])
        msg = "Conditional GP time series does not match"
        assert np.allclose(p1, p2, atol=1e-4, rtol=1e-4), msg

        # check mean of sampled processes
        p2 = np.mean(np.array(
            [pc[proc] for pc in c.sample_processes(p0, n=1000)]),
                     axis=0)
        msg = "Mean of sampled conditional GP processes does not match"
        assert np.allclose(p1, p2, atol=1e-4, rtol=1e-4)

        # now try with a common process

        crn = gp_signals.FourierBasisCommonGP(spectrum=pl,
                                              orf=utils.hd_orf(),
                                              components=10,
                                              combine=False)

        model = ef + tm + ec + crn
        pta = signal_base.PTA([model(self.psr), model(self.psr2)])

        p0 = {
            "B1855+09_basis_ecorr_log10_ecorr": -5.861847220080768,
            "B1855+09_efac": 4.588342210948306,
            "B1937+21_basis_ecorr_log10_ecorr": -9.151872649912377,
            "B1937+21_efac": 0.8947815819783302,
            "common_fourier_gamma": 6.638289750637263,
            "common_fourier_log10_A": -15.68180643904114,
        }

        c = utils.ConditionalGP(pta)
        cmean = c.get_mean_coefficients(p0)

        idx, ntot = {}, 0
        for l, v in cmean.items():
            idx[l] = slice(ntot, ntot + len(v))
            ntot = ntot + len(v)

        TNrs = pta.get_TNr(p0)
        TNTs = pta.get_TNT(p0)
        phiinvs = pta.get_phiinv(p0, logdet=False, method="cliques")

        TNr = np.concatenate(TNrs)
        Sigma = sps.block_diag(TNTs, "csc") + sps.csc_matrix(phiinvs)

        ch = cholesky(Sigma)
        mn = ch(TNr)

        msg = "Conditional GP coefficient value does not match for common GP"
        for l, v in cmean.items():
            assert np.allclose(mn[idx[l]], v)
示例#40
0
def qslim_decimator_transformer(mesh, factor=None, n_verts_desired=None):
    """Return a simplified version of this mesh.
    A Qslim-style approach is used here.
    :param factor: fraction of the original vertices to retain
    :param n_verts_desired: number of the original vertices to retain
    :returns: new_faces: An Fx3 array of faces, mtx: Transformation matrix
    """

    if factor is None and n_verts_desired is None:
        raise Exception('Need either factor or n_verts_desired.')

    if n_verts_desired is None:
        n_verts_desired = math.ceil(len(mesh.v) * factor)

    Qv = vertex_quadrics(mesh)

    # fill out a sparse matrix indicating vertex-vertex adjacency
    # from psbody.mesh.topology.connectivity import get_vertices_per_edge
    vert_adj = get_vertices_per_edge(mesh.v, mesh.f)
    # vert_adj = sp.lil_matrix((len(mesh.v), len(mesh.v)))
    # for f_idx in range(len(mesh.f)):
    #     vert_adj[mesh.f[f_idx], mesh.f[f_idx]] = 1

    vert_adj = sp.csc_matrix(
        (vert_adj[:, 0] * 0 + 1, (vert_adj[:, 0], vert_adj[:, 1])),
        shape=(len(mesh.v), len(mesh.v)))
    vert_adj = vert_adj + vert_adj.T
    vert_adj = vert_adj.tocoo()

    def collapse_cost(Qv, r, c, v):
        Qsum = Qv[r, :, :] + Qv[c, :, :]
        p1 = np.vstack((v[r].reshape(-1, 1), np.array([1]).reshape(-1, 1)))
        p2 = np.vstack((v[c].reshape(-1, 1), np.array([1]).reshape(-1, 1)))

        destroy_c_cost = p1.T.dot(Qsum).dot(p1)
        destroy_r_cost = p2.T.dot(Qsum).dot(p2)
        result = {
            'destroy_c_cost': destroy_c_cost,
            'destroy_r_cost': destroy_r_cost,
            'collapse_cost': min([destroy_c_cost, destroy_r_cost]),
            'Qsum': Qsum
        }
        return result

    # construct a queue of edges with costs
    queue = []
    for k in range(vert_adj.nnz):
        r = vert_adj.row[k]
        c = vert_adj.col[k]

        if r > c:
            continue

        cost = collapse_cost(Qv, r, c, mesh.v)['collapse_cost']
        heapq.heappush(queue, (cost, (r, c)))

    # decimate
    collapse_list = []
    nverts_total = len(mesh.v)
    faces = mesh.f.copy()
    while nverts_total > n_verts_desired:
        e = heapq.heappop(queue)
        r = e[1][0]
        c = e[1][1]
        if r == c:
            continue

        cost = collapse_cost(Qv, r, c, mesh.v)
        if cost['collapse_cost'] > e[0]:
            heapq.heappush(queue, (cost['collapse_cost'], e[1]))
            # print 'found outdated cost, %.2f < %.2f' % (e[0], cost['collapse_cost'])
            continue
        else:

            # update old vert idxs to new one,
            # in queue and in face list
            if cost['destroy_c_cost'] < cost['destroy_r_cost']:
                to_destroy = c
                to_keep = r
            else:
                to_destroy = r
                to_keep = c

            collapse_list.append([to_keep, to_destroy])

            # in our face array, replace "to_destroy" vertidx with "to_keep" vertidx
            np.place(faces, faces == to_destroy, to_keep)

            # same for queue
            which1 = [
                idx for idx in range(len(queue))
                if queue[idx][1][0] == to_destroy
            ]
            which2 = [
                idx for idx in range(len(queue))
                if queue[idx][1][1] == to_destroy
            ]
            for k in which1:
                queue[k] = (queue[k][0], (to_keep, queue[k][1][1]))
            for k in which2:
                queue[k] = (queue[k][0], (queue[k][1][0], to_keep))

            Qv[r, :, :] = cost['Qsum']
            Qv[c, :, :] = cost['Qsum']

            a = faces[:, 0] == faces[:, 1]
            b = faces[:, 1] == faces[:, 2]
            c = faces[:, 2] == faces[:, 0]

            # remove degenerate faces
            def logical_or3(x, y, z):
                return np.logical_or(x, np.logical_or(y, z))

            faces_to_keep = np.logical_not(logical_or3(a, b, c))
            faces = faces[faces_to_keep, :].copy()

        nverts_total = (len(np.unique(faces.flatten())))

    new_faces, mtx = _get_sparse_transform(faces, len(mesh.v))
    return new_faces, mtx
示例#41
0
import numpy as np
from scipy import sparse
from sklearn.linear_model import LassoCV, RidgeCV, ElasticNetCV
from sklearn.cross_validation import KFold

data = np.array([[int(tok) for tok in line.split('\t')[:3]]
                 for line in open('data/ml-100k/u.data')])
ij = data[:, :2]
ij -= 1  # original data is in 1-based system
values = data[:, 2]
reviews = sparse.csc_matrix((values, ij.T)).astype(float)

reg = ElasticNetCV(fit_intercept=True,
                   alphas=[0.0125, 0.025, 0.05, .125, .25, .5, 1., 2., 4.])


def movie_norm(xc):
    xc = xc.copy().toarray()
    x1 = np.array([xi[xi > 0].mean() for xi in xc])
    x1 = np.nan_to_num(x1)

    for i in range(xc.shape[0]):
        xc[i] -= (xc[i] > 0) * x1[i]
    return xc, x1


def learn_for(i):
    u = reviews[i]
    us = np.delete(np.arange(reviews.shape[0]), i)
    ps, = np.where(u.toarray().ravel() > 0)
    x = reviews[us][:, ps].T
示例#42
0
def test_check_array():
    # accept_sparse == False
    # raise error on sparse inputs
    X = [[1, 2], [3, 4]]
    X_csr = sp.csr_matrix(X)
    with pytest.raises(TypeError):
        check_array(X_csr)

    # ensure_2d=False
    X_array = check_array([0, 1, 2], ensure_2d=False)
    assert X_array.ndim == 1
    # ensure_2d=True with 1d array
    with pytest.raises(ValueError, match="Expected 2D array,"
                                         " got 1D array instead"):
        check_array([0, 1, 2], ensure_2d=True)

    # ensure_2d=True with scalar array
    with pytest.raises(ValueError, match="Expected 2D array,"
                                         " got scalar array instead"):
        check_array(10, ensure_2d=True)

    # don't allow ndim > 3
    X_ndim = np.arange(8).reshape(2, 2, 2)
    with pytest.raises(ValueError):
        check_array(X_ndim)
    check_array(X_ndim, allow_nd=True)  # doesn't raise

    # dtype and order enforcement.
    X_C = np.arange(4).reshape(2, 2).copy("C")
    X_F = X_C.copy("F")
    X_int = X_C.astype(int)
    X_float = X_C.astype(float)
    Xs = [X_C, X_F, X_int, X_float]
    dtypes = [np.int32, int, float, np.float32, None, bool, object]
    orders = ['C', 'F', None]
    copys = [True, False]

    for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
        X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
        if dtype is not None:
            assert X_checked.dtype == dtype
        else:
            assert X_checked.dtype == X.dtype
        if order == 'C':
            assert X_checked.flags['C_CONTIGUOUS']
            assert not X_checked.flags['F_CONTIGUOUS']
        elif order == 'F':
            assert X_checked.flags['F_CONTIGUOUS']
            assert not X_checked.flags['C_CONTIGUOUS']
        if copy:
            assert X is not X_checked
        else:
            # doesn't copy if it was already good
            if (X.dtype == X_checked.dtype and
                    X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
                    and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
                assert X is X_checked

    # allowed sparse != None
    X_csc = sp.csc_matrix(X_C)
    X_coo = X_csc.tocoo()
    X_dok = X_csc.todok()
    X_int = X_csc.astype(int)
    X_float = X_csc.astype(float)

    Xs = [X_csc, X_coo, X_dok, X_int, X_float]
    accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
    for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
                                                 copys):
        with warnings.catch_warnings(record=True) as w:
            X_checked = check_array(X, dtype=dtype,
                                    accept_sparse=accept_sparse, copy=copy)
        if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
            # XXX unreached code as of v0.22
            message = str(w[0].message)
            messages = ["object dtype is not supported by sparse matrices",
                        "Can't check dok sparse matrix for nan or inf."]
            assert message in messages
        else:
            assert len(w) == 0
        if dtype is not None:
            assert X_checked.dtype == dtype
        else:
            assert X_checked.dtype == X.dtype
        if X.format in accept_sparse:
            # no change if allowed
            assert X.format == X_checked.format
        else:
            # got converted
            assert X_checked.format == accept_sparse[0]
        if copy:
            assert X is not X_checked
        else:
            # doesn't copy if it was already good
            if X.dtype == X_checked.dtype and X.format == X_checked.format:
                assert X is X_checked

    # other input formats
    # convert lists to arrays
    X_dense = check_array([[1, 2], [3, 4]])
    assert isinstance(X_dense, np.ndarray)
    # raise on too deep lists
    with pytest.raises(ValueError):
        check_array(X_ndim.tolist())
    check_array(X_ndim.tolist(), allow_nd=True)  # doesn't raise

    # convert weird stuff to arrays
    X_no_array = _NotAnArray(X_dense)
    result = check_array(X_no_array)
    assert isinstance(result, np.ndarray)
示例#43
0
    def test_1d5_with_spin_7particles(self):
        dimension = 1
        grid_length = 5
        n_spatial_orbitals = grid_length**dimension
        wigner_seitz_radius = 9.3

        spinless = False
        n_qubits = n_spatial_orbitals
        if not spinless:
            n_qubits *= 2
        n_particles_big = 7

        length_scale = wigner_seitz_length_scale(wigner_seitz_radius,
                                                 n_particles_big, dimension)

        self.grid3 = Grid(dimension, grid_length, length_scale)
        # Get the occupied orbitals of the plane-wave basis Hartree-Fock state.
        hamiltonian = jellium_model(self.grid3, spinless, plane_wave=True)
        hamiltonian = normal_ordered(hamiltonian)
        hamiltonian.compress()

        occupied_states = numpy.array(
            lowest_single_particle_energy_states(hamiltonian, n_particles_big))
        self.hf_state_index3 = numpy.sum(2**occupied_states)

        self.hf_state3 = csc_matrix(([1.0], ([self.hf_state_index3], [0])),
                                    shape=(2**n_qubits, 1))

        self.orbital_occupations3 = [
            digit == '1' for digit in bin(self.hf_state_index3)[2:]
        ][::-1]
        self.occupied_orbitals3 = [
            index for index, occupied in enumerate(self.orbital_occupations3)
            if occupied
        ]

        self.reversed_occupied_orbitals3 = list(self.occupied_orbitals3)
        for i in range(len(self.reversed_occupied_orbitals3)):
            self.reversed_occupied_orbitals3[i] = -1 + int(
                numpy.log2(self.hf_state3.shape[0])
            ) - self.reversed_occupied_orbitals3[i]

        self.reversed_hf_state_index3 = sum(
            2**index for index in self.reversed_occupied_orbitals3)

        operator = (FermionOperator('6^ 0^ 1^ 3 5 4', 2) +
                    FermionOperator('7^ 2^ 4 1') +
                    FermionOperator('3^ 3', 2.1) +
                    FermionOperator('5^ 3^ 1 0', 7.3))
        operator = normal_ordered(operator)
        transformed_operator = normal_ordered(
            fourier_transform(operator, self.grid3, spinless))

        expected = 1.66 - 0.0615536707435j
        # Calculated with expected = expectation(get_sparse_operator(
        #    transformed_operator), self.hf_state3)
        actual = expectation_db_operator_with_pw_basis_state(
            operator, self.reversed_occupied_orbitals3, n_spatial_orbitals,
            self.grid3, spinless)

        self.assertAlmostEqual(expected, actual)
示例#44
0
def test_1d(L=150,
            sigma=np.array([1, .5]),
            pop_sizes=np.array([1, 1]),
            t=80,
            x0=-7):
    '''Test of analytic formula for the density of 1d skew Bm against law of first coordinate'''
    beta = ((sigma[1] * pop_sizes[1])**2 -
            (sigma[0] * pop_sizes[0])**2) / np.sum((sigma * pop_sizes)**2)
    mid = L / 2
    position = sparse.csc_matrix(([1], ([mid + x0 + L * mid], [0])),
                                 shape=(L**2, 1))

    X = np.tile(np.arange(L), (L, 1))
    Y = np.transpose(X)

    M = migration_matrix(L, sigma, pop_sizes)
    Green = position
    for _ in np.arange(t):
        Green = M * Green

    Green = Green.todense()
    Green = Green.reshape((L, L))

    marginal = np.array(np.sum(Green, 0))[0]  # Calculate the marginal Density

    skew_Bm = one_dim_density(t, x0,
                              np.arange(L) - mid, beta, np.sqrt(sigma[1]),
                              np.sqrt(sigma[0]))

    print("Sanity Check: Total sum Marg. density: %.4f" % np.sum(marginal))
    print("Sanity Check: Total sum Skew BM: %.4f" % np.sum(skew_Bm))
    print("Sum of absolute Differences: %.4g" %
          np.sum(np.abs(marginal - skew_Bm)))

    # Plot by Harald:
    x_vec = np.arange(L) - mid
    plt.figure()
    plt.plot(x_vec,
             marginal,
             color="cyan",
             alpha=0.8,
             label="Marginal Density: Simulated",
             linewidth=2)
    plt.plot(x_vec,
             skew_Bm,
             color="crimson",
             alpha=0.8,
             label="Skew B. Motion Theory",
             linewidth=2)
    plt.vlines(0, 0, max(skew_Bm), linewidth=2, label="Interface")
    plt.legend(loc="upper right")
    plt.xlabel("$\Delta$ x-Axis")
    plt.ylabel("PDF")
    plt.show()

    # comparing variance of 2nd coordinate to expected variance of 2d skew Bm

    conditional = np.multiply(Green, 1 / marginal)

    variance = np.array(np.sum(np.multiply((Y - mid)**2, conditional), 0))[0]

    def conditional_variance(z, s, x, y, t, beta, sigma1, sigma2):
        sigmaz = .5 * (sigma1 + sigma2) + np.sign(z) * .5 * (sigma1 - sigma2)
        return sigmaz**2 * np.multiply(
            one_dim_density(s, x, z, beta, sigma1, sigma2),
            one_dim_density(t - s, z, y, beta,
                            sigma1, sigma2)) / one_dim_density(
                                t, x, y, beta, sigma1, sigma2)

    def f3(s, x, y, t, beta, sigma1, sigma2):
        return np.sum(
            conditional_variance(.5 * (x + y) + np.arange(L) - mid, s, x, y, t,
                                 beta, sigma1, sigma2))

    def expected_variance(x, y, t, beta, sigma1, sigma2):
        return quad(f3, 0, t, args=(x, y, t, beta, sigma1, sigma2))[0]

    true_variance = np.zeros(L)
    for y in np.arange(L):
        # print y
        true_variance[y] = expected_variance(x0, y - mid, t, beta,
                                             np.sqrt(sigma[1]),
                                             np.sqrt(sigma[0]))

    # displaying the relative difference in the variance, wheighted by the pdf the first coordinate
    print np.sum(marginal * np.abs(true_variance - variance) / variance)

    print("Calculate expected Variance!")
    plt.figure()
    plt.plot(np.arange(L), variance)
    plt.plot(np.arange(L), true_variance)
    plt.show()
示例#45
0
 def test_expectation_bad_operator_type(self):
     with self.assertRaises(TypeError):
         expectation_computational_basis_state(
             'never', csc_matrix(([1], ([6], [0])), shape=(16, 1)))
示例#46
0
 def test_expectation_qubit_operator_not_implemented(self):
     with self.assertRaises(NotImplementedError):
         expectation_computational_basis_state(
             QubitOperator(), csc_matrix(([1], ([6], [0])), shape=(16, 1)))
示例#47
0
    def test_expectation_fermion_operator_single_number_terms(self):
        operator = FermionOperator('3^ 3', 1.9) + FermionOperator('2^ 1')
        state = csc_matrix(([1], ([15], [0])), shape=(16, 1))

        self.assertAlmostEqual(
            expectation_computational_basis_state(operator, state), 1.9)
示例#48
0
    def test_expectation_identity_fermion_operator(self):
        operator = FermionOperator.identity() * 1.1
        state = csc_matrix(([1], ([6], [0])), shape=(16, 1))

        self.assertAlmostEqual(
            expectation_computational_basis_state(operator, state), 1.1)
示例#49
0
 def test_expectation_correct_zero(self):
     operator = get_sparse_operator(QubitOperator('X0'), n_qubits=2)
     vector = csc_matrix(
         ([1j, -1j, -1j, -1j], ([0, 1, 2, 3], [0, 0, 0, 0])), shape=(4, 1))
     self.assertAlmostEqual(expectation(operator, vector), 0.0)
示例#50
0
 def test_expectation_invalid_state_length(self):
     operator = get_sparse_operator(QubitOperator('X0'), n_qubits=2)
     vector = csc_matrix(([1j, -1j, -1j], ([0, 1, 2], [0, 0, 0])),
                         shape=(3, 1))
     with self.assertRaises(ValueError):
         expectation(operator, vector)
示例#51
0
try:
    from qpsolvers import dense_solvers, sparse_solvers
    from qpsolvers import solve_qp
except ImportError:  # run locally if not installed
    sys.path.append(dirname(realpath(__file__)) + '/..')
    from qpsolvers import dense_solvers, sparse_solvers
    from qpsolvers import solve_qp

# QP matrices
n = 500
M = scipy.sparse.lil_matrix(scipy.sparse.eye(n))
for i in range(1, n - 1):
    M[i, i + 1] = -1
    M[i, i - 1] = 1
P = csc_matrix(M.dot(M.transpose()))
q = -numpy.ones((n, ))
G = csc_matrix(-scipy.sparse.eye(n))
h = -2 * numpy.ones((n, ))
P_array = numpy.array(P.todense())
G_array = numpy.array(G.todense())


def check_same_solutions(tol=0.05):
    sol0 = solve_qp(P, q, G, h, solver=sparse_solvers[0])
    for solver in sparse_solvers:
        sol = solve_qp(P, q, G, h, solver=solver)
        relvar = norm(sol - sol0) / norm(sol0)
        assert relvar < tol, "%s's solution offset by %.1f%%" % (solver,
                                                                 100. * relvar)
    for solver in dense_solvers:
示例#52
0
 def test_qubit_operator_sparse_n_qubits_not_specified(self):
     expected = csc_matrix(([1, 1, 1, 1], ([1, 0, 3, 2], [0, 1, 2, 3])),
                           shape=(4, 4))
     self.assertTrue(
         numpy.allclose(
             qubit_operator_sparse(QubitOperator('X1')).A, expected.A))
示例#53
0
def test_incr_mean_variance_axis():
    for axis in [0, 1]:
        rng = np.random.RandomState(0)
        n_features = 50
        n_samples = 10
        data_chunks = [
            rng.randint(0, 2, size=n_features) for i in range(n_samples)
        ]

        # default params for incr_mean_variance
        last_mean = np.zeros(n_features)
        last_var = np.zeros_like(last_mean)
        last_n = np.zeros_like(last_mean, dtype=np.int64)

        # Test errors
        X = np.array(data_chunks[0])
        X = np.atleast_2d(X)
        X_lil = sp.lil_matrix(X)
        X_csr = sp.csr_matrix(X_lil)
        assert_raises(TypeError, incr_mean_variance_axis, axis, last_mean,
                      last_var, last_n)
        assert_raises(TypeError, incr_mean_variance_axis, axis, last_mean,
                      last_var, last_n)
        assert_raises(TypeError, incr_mean_variance_axis, X_lil, axis,
                      last_mean, last_var, last_n)

        # Test _incr_mean_and_var with a 1 row input
        X_means, X_vars = mean_variance_axis(X_csr, axis)
        X_means_incr, X_vars_incr, n_incr = \
            incr_mean_variance_axis(X_csr, axis, last_mean, last_var, last_n)
        assert_array_almost_equal(X_means, X_means_incr)
        assert_array_almost_equal(X_vars, X_vars_incr)
        # X.shape[axis] picks # samples
        assert_array_equal(X.shape[axis], n_incr)

        X_csc = sp.csc_matrix(X_lil)
        X_means, X_vars = mean_variance_axis(X_csc, axis)
        assert_array_almost_equal(X_means, X_means_incr)
        assert_array_almost_equal(X_vars, X_vars_incr)
        assert_array_equal(X.shape[axis], n_incr)

        # Test _incremental_mean_and_var with whole data
        X = np.vstack(data_chunks)
        X_lil = sp.lil_matrix(X)
        X_csr = sp.csr_matrix(X_lil)
        X_csc = sp.csc_matrix(X_lil)

        expected_dtypes = [(np.float32, np.float32), (np.float64, np.float64),
                           (np.int32, np.float64), (np.int64, np.float64)]

        for input_dtype, output_dtype in expected_dtypes:
            for X_sparse in (X_csr, X_csc):
                X_sparse = X_sparse.astype(input_dtype)
                last_mean = last_mean.astype(output_dtype)
                last_var = last_var.astype(output_dtype)
                X_means, X_vars = mean_variance_axis(X_sparse, axis)
                X_means_incr, X_vars_incr, n_incr = \
                    incr_mean_variance_axis(X_sparse, axis, last_mean,
                                            last_var, last_n)
                assert X_means_incr.dtype == output_dtype
                assert X_vars_incr.dtype == output_dtype
                assert_array_almost_equal(X_means, X_means_incr)
                assert_array_almost_equal(X_vars, X_vars_incr)
                assert_array_equal(X.shape[axis], n_incr)
示例#54
0
 def test_jw_sparse_1annihilate(self):
     expected = csc_matrix(([1, -1], ([0, 2], [1, 3])), shape=(4, 4))
     self.assertTrue(
         numpy.allclose(
             jordan_wigner_sparse(FermionOperator('1')).A, expected.A))
示例#55
0
    random_state = check_random_state(random_state)
    w0 = random_state.normal(mean_w0, stdev_w0)
    w = random_state.normal(mean_w, stdev_w, n_features)
    V = random_state.normal(mean_V, stdev_V, (rank, n_features))

    y = ffm_predict(w0, w, V, X)
    if label_stdev > 0:
        y = random_state.normal(y, label_stdev)

    return X, y, (w0, w, V)


if __name__ == '__main__':
    X, y, coef = make_user_item_regression(n_user=5, n_item=5, rank=2,
                                           label_stdev=2)
    from sklearn.cross_validation import train_test_split
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.33, random_state=42)

    from mcmc import FMRegression
    fm = FMRegression(rank=2)
    y_pred = fm.fit_predict(sp.csc_matrix(X_train), y_train,
                            sp.csc_matrix(X_test))

    print 'rmse', mean_squared_error(y_pred, y_test)
    print 'r2_score', r2_score(y_pred, y_test)
    np.random.shuffle(y_pred)
    print '----  shuffled pred ---------'
    print 'rmse', mean_squared_error(y_pred, y_test)
    print 'r2_score', r2_score(y_pred, y_test)
示例#56
0
 def test_jw_sparse_0create_2annihilate(self):
     expected = csc_matrix(([-1j, 1j], ([4, 6], [1, 3])), shape=(8, 8))
     self.assertTrue(
         numpy.allclose(
             jordan_wigner_sparse(FermionOperator('0^ 2', -1j)).A,
             expected.A))
    def fit(self, l1_penalty=0.1, l2_penalty=0.1, positive_only=True):

        self.l1_penalty = l1_penalty
        self.l2_penalty = l2_penalty
        self.positive_only = positive_only

        if self.l1_penalty + self.l2_penalty != 0:
            self.l1_ratio = self.l1_penalty / (self.l1_penalty +
                                               self.l2_penalty)
        else:
            print(
                "SLIM_ElasticNet: l1_penalty+l2_penalty cannot be equal to zero, setting the ratio l1/(l1+l2) to 1.0"
            )
            self.l1_ratio = 1.0

        # initialize the ElasticNet model
        self.model = ElasticNet(alpha=self.alpha,
                                l1_ratio=self.l1_ratio,
                                positive=self.positive_only,
                                fit_intercept=False,
                                copy_X=False,
                                precompute=True,
                                selection='random',
                                max_iter=self.max_iter,
                                tol=self.tol)

        URM_train = sps.csc_matrix(self.URM_train)

        n_items = URM_train.shape[1]

        # Use array as it reduces memory requirements compared to lists
        dataBlock = 10000000

        rows = np.zeros(dataBlock, dtype=np.int32)
        cols = np.zeros(dataBlock, dtype=np.int32)
        values = np.zeros(dataBlock, dtype=np.float32)

        numCells = 0

        start_time = time.time()
        start_time_printBatch = start_time

        # fit each item's factors sequentially (not in parallel)
        for currentItem in range(n_items):

            # get the target column
            y = URM_train[:, currentItem].toarray()

            # set the j-th column of X to zero
            start_pos = URM_train.indptr[currentItem]
            end_pos = URM_train.indptr[currentItem + 1]

            current_item_data_backup = URM_train.data[start_pos:end_pos].copy()
            URM_train.data[start_pos:end_pos] = 0.0

            # fit one ElasticNet model per column
            self.model.fit(URM_train, y)

            # self.model.coef_ contains the coefficient of the ElasticNet model
            # let's keep only the non-zero values

            # Select topK values
            # Sorting is done in three steps. Faster then plain np.argsort for higher number of items
            # - Partition the data to extract the set of relevant items
            # - Sort only the relevant items
            # - Get the original item index

            nonzero_model_coef_index = self.model.sparse_coef_.indices
            nonzero_model_coef_value = self.model.sparse_coef_.data

            local_topK = min(len(nonzero_model_coef_value) - 1, self.topK)

            relevant_items_partition = (
                -nonzero_model_coef_value
            ).argpartition(local_topK)[0:local_topK]
            relevant_items_partition_sorting = np.argsort(
                -nonzero_model_coef_value[relevant_items_partition])
            ranking = relevant_items_partition[
                relevant_items_partition_sorting]

            for index in range(len(ranking)):

                if numCells == len(rows):
                    rows = np.concatenate(
                        (rows, np.zeros(dataBlock, dtype=np.int32)))
                    cols = np.concatenate(
                        (cols, np.zeros(dataBlock, dtype=np.int32)))
                    values = np.concatenate(
                        (values, np.zeros(dataBlock, dtype=np.float32)))

                rows[numCells] = nonzero_model_coef_index[ranking[index]]
                cols[numCells] = currentItem
                values[numCells] = nonzero_model_coef_value[ranking[index]]

                numCells += 1

            # finally, replace the original values of the j-th column
            URM_train.data[start_pos:end_pos] = current_item_data_backup

            if time.time(
            ) - start_time_printBatch > 300 or currentItem == n_items - 1:
                print(
                    "Processed {} ( {:.2f}% ) in {:.2f} minutes. Items per second: {:.0f}"
                    .format(currentItem + 1,
                            100.0 * float(currentItem + 1) / n_items,
                            (time.time() - start_time) / 60,
                            float(currentItem) / (time.time() - start_time)))
                sys.stdout.flush()
                sys.stderr.flush()

                start_time_printBatch = time.time()

        # generate the sparse weight matrix
        self.W_sparse = sps.csr_matrix(
            (values[:numCells], (rows[:numCells], cols[:numCells])),
            shape=(n_items, n_items),
            dtype=np.float32)
        sps.save_npz("ElasticNet-Sim.npz", self.W_sparse)
示例#58
0
 def test_jw_sparse_0create(self):
     expected = csc_matrix(([1], ([1], [0])), shape=(2, 2))
     self.assertTrue(
         numpy.allclose(
             jordan_wigner_sparse(FermionOperator('0^')).A, expected.A))
示例#59
0
 def compress(self):
     """Compress the matrix"""
     if not self.is_sparse: return
     self.intra = csc_matrix(self.intra)  # transport into csc_matrix
     self.intra.eliminate_zeros()
     self.intra = coo_matrix(self.intra)  # transport into csc_matrix
 def cal_E(self, extrema):
     ### 三元组来存 稀疏矩阵 ###
     row = []
     col = []
     data = []
     ### 此题目就是求解线性方程组 Ax = b ###
     ### 每个E都是其所有邻居的加权平均 E(r) = Wrs * E(s) 建立一个(mn, k)矩阵即可 ###
     half = int(self.k / 2)
     b = np.zeros((self.r * self.l, 1))
     pbar = tqdm.tqdm(range(self.r))
     pbar.set_description('Caculating Matrix A and b...')
     self.s_pbar.set('Caculating Matrix A and b...')
     self.p1['value'] = 0
     for ii in pbar:
         self.p1['value'] += 100 / self.r
         for kk in range(self.l):
             cur_id = ii * self.l + kk
             ### 直接给当前点赋值A 1 ###
             row.append(cur_id)
             col.append(cur_id)
             data.append(1)
             if (ii, kk) in extrema.keys():
                 ### 直接给b赋值 ###
                 b[cur_id, 0] = self.img1[ii, kk]
             else:
                 ### 计算相邻的点 ###
                 l = max(0, kk - half)
                 r = min(self.l - 1, kk + half)
                 u = max(0, ii - half)
                 d = min(self.r - 1, ii + half)
                 s = 1e-5
                 W = []
                 for i in range(u, d + 1):
                     for k in range(l, r + 1):
                         if i == ii and k == kk: continue
                         W.append(
                             np.exp((int(self.img1[ii, kk]) -
                                     int(self.img1[i, k]))**2 *
                                    self.sigma[ii, kk]))
                         s += W[-1]
                 index = 0
                 for i in range(u, d + 1):
                     for k in range(l, r + 1):
                         if i == ii and k == kk: continue
                         ### 计算当前权重 ###
                         w_ik = W[index] / s
                         index += 1
                         if (i, k) in extrema.keys():
                             ### 常量 ###
                             b[cur_id, 0] += w_ik * self.img1[i, k]
                         else:
                             ### 给矩阵 A 赋值 ###
                             row.append(cur_id)
                             col.append(i * self.l + k)
                             data.append(-w_ik)
     pbar.close()
     ### 求解方程组并返回 ###
     self.s_pbar.set('Solving Equations...')
     sp_A = csc_matrix((data, (row, col)),
                       shape=(self.r * self.l, self.r * self.l))
     return spsolve(sp_A, b).reshape((self.r, self.l))