예제 #1
0
	def set_inv_cov(self):
		"""
		Set the inverse correlation matrix.
		
		The correlation matrix is a sum of a constant and a noise matrix. 
		The noise matrix is built from dot products of random unit vectors 
		u_1,...,u_N each in R^M, entries ~ Uniform, s.t. |u| = 1. The noise 
		matrix is dot(u, u)*epsilon for non-diag entries; 0 on the diag. 
		The constant matrix has rho in the off-diag elements and 1 on 
		the diag.
		"""
		
		self.cov = sp.ones((self.nD, self.nD))*self.rho
		sp.fill_diagonal(self.cov, 1)
		
		sp.random.seed(self.seed)
		u_vecs = sp.random.uniform(-1, 1, size=(self.nD, self.noiseD))
		u_norms = sp.sqrt(sp.sum(u_vecs**2.0, axis=1))
		u_vecs = (u_vecs.T/u_norms).T
		noise_matrix = sp.dot(u_vecs, u_vecs.T)*self.epsilon
		sp.fill_diagonal(noise_matrix, 0)
		
		self.cov += noise_matrix
		self.inv_cov = LA.inv(self.cov)
		self._diag = sp.diag(self.inv_cov)
예제 #2
0
    def print_verbose_message(self):
        """Method to print training statistics if Verbose is TRUE"""

        # Memory usage (does not work in Windows)
        # print('Peak memory usage: %.2f MB' % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / infer_platform() ))

        # Variance explained
        r2 = s.asarray(self.calculate_variance_explained(total=True)).mean(axis=0)
        r2[r2<0] = 0.
        print("- Variance explained:  " + "   ".join([ "View %s: %.2f%%" % (m,100*r2[m]) for m in range(self.dim["M"])]))

        # Sparsity levels of the weights
        W = self.nodes["W"].getExpectation()
        foo = [s.mean(s.absolute(W[m])<1e-3) for m in range(self.dim["M"])]
        print("- Fraction of zero weights:  " + "   ".join([ "View %s: %.0f%%" % (m,100*foo[m]) for m in range(self.dim["M"])]))

        # Correlation between factors
        Z = self.nodes["Z"].getExpectation()
        Z += s.random.normal(s.zeros(Z.shape),1e-10)
        r = s.absolute(corr(Z.T,Z.T)); s.fill_diagonal(r,0)
        print("- Maximum correlation between factors: %.2f" % (s.nanmax(r)))

        # Factor norm
        bar = s.mean(s.square(Z),axis=0)
        print("- Factor norms:  " + " ".join([ "%.2f" % bar[k] for k in range(Z.shape[1])]))

        # Tau
        tau = self.nodes["Tau"].getExpectation()
        print("- Tau per view (average):  " + "   ".join([ "View %s: %.2f" % (m,tau[m].mean()) for m in range(self.dim["M"])]))

        print("\n")
예제 #3
0
파일: tdvp_gen.py 프로젝트: bcriger/evoMPS
 def setup_A(self):
     """Initializes the state to full rank with norm 1.
     """
     for n in xrange(1, self.N + 1):
         self.A[n].fill(0)
         
         f = sp.sqrt(1. / self.q[n])
         
         if self.D[n-1] == self.D[n]:
             for s in xrange(self.q[n]):
                 sp.fill_diagonal(self.A[n][s], f)
         else:
             x = 0
             y = 0
             s = 0
             
             if self.D[n] > self.D[n - 1]:
                 f = 1.
             
             for i in xrange(max((self.D[n], self.D[n - 1]))):
                 self.A[n][s, x, y] = f
                 x += 1
                 y += 1
                 if x >= self.A[n][s].shape[0]:
                     x = 0
                     s += 1
                 elif y >= self.A[n][s].shape[1]:
                     y = 0
                     s += 1
예제 #4
0
def coulomb_mat_eigvals(atoms, at_idx, r_cut, do_calc_connect=True, n_eigs=20):

    if do_calc_connect:
        atoms.set_cutoff(8.0)
        atoms.calc_connect()
    pos = sp.vstack((sp.asarray([sp.asarray(a.diff) for a in atoms.neighbours[at_idx]]), sp.zeros(3)))
    Z = sp.hstack((sp.asarray([atoms.z[a.j] for a in atoms.neighbours[at_idx]]), atoms.z[at_idx]))

    M = sp.outer(Z, Z) / (sp.spatial.distance_matrix(pos, pos) + np.eye(pos.shape[0]))
    sp.fill_diagonal(M, 0.5 * Z ** 2.4)

    # data = [[atoms.z[a.j], sp.asarray(a.diff)] for a in atoms.neighbours[at_idx]]
    # data.append([atoms.z[at_idx], sp.array([0,0,0])]) # central atom
    # M = sp.zeros((len(data), len(data)))
    # for i, atom1 in enumerate(data):
    #     M[i,i] = 0.5 * atom1[0] ** 2.4
    #     for j, atom2 in enumerate(data[i+1:]):
    #         j += i+1
    #         M[i,j] =  atom1[0] * atom2[0] / LA.norm(atom1[1] - atom2[1])
    # M = 0.5 * (M + M.T)
    eigs = (LA.eigh(M, eigvals_only=True))[::-1]
    if n_eigs == None:
        return eigs # all
    elif eigs.size >= n_eigs:
        return eigs[:n_eigs] # only first few eigenvectors
    else:
        return sp.hstack((eigs, sp.zeros(n_eigs - eigs.size))) # zero-padded extra fields
예제 #5
0
    def initialize_state(self):
        """Initializes the state to a hard-coded full rank state with norm 1.
        """
        for n in xrange(1, self.N + 1):
            self.A[n].fill(0)

            f = sp.sqrt(1. / self.q[n])

            if self.D[n - 1] == self.D[n]:
                for s in xrange(self.q[n]):
                    sp.fill_diagonal(self.A[n][s], f)
            else:
                x = 0
                y = 0
                s = 0

                if self.D[n] > self.D[n - 1]:
                    f = 1.

                for i in xrange(max((self.D[n], self.D[n - 1]))):
                    self.A[n][s, x, y] = f
                    x += 1
                    y += 1
                    if x >= self.A[n][s].shape[0]:
                        x = 0
                        s += 1
                    elif y >= self.A[n][s].shape[1]:
                        y = 0
                        s += 1
예제 #6
0
    def _normalize_wls(solution, corr_mtx, n_factors):
        """
        Weighted least squares normalization for loadings
        estimated using MINRES.

        Parameters
        ----------
        solution : np.array
            The solution from the L-BFGS-B optimization.
        corr_mtx : np.array
            The correlation matrix.
        n_factors : int
            The number of factors to select.

        Returns
        -------
        loadings : pd.DataFrame
            The factor loading matrix
        """
        sp.fill_diagonal(corr_mtx, 1 - solution)

        # get the eigenvalues and vectors for n_factors
        values, vectors = sp.linalg.eigh(corr_mtx)

        # sort the values and vectors in ascending order
        values = values[::-1][:n_factors]
        vectors = vectors[:, ::-1][:, :n_factors]

        # calculate loadings
        # if values are smaller than 0, set them to zero
        loadings = sp.dot(vectors, sp.diag(sp.sqrt(np.maximum(values, 0))))
        return loadings
예제 #7
0
def Gaussian(R, kernel_para, p):  ## computes matrix K
    def f(x):
        return sp.exp(-(x ** p) / (2 * kernel_para ** 2))

    res = map(f, R)
    ds = squareform(res)
    n = ds.shape[0]
    I = sp.zeros(shape=(n, n))
    sp.fill_diagonal(I, f(0))

    return ds + I
예제 #8
0
def TCA(X_S, X_T, m=40, mu=0.1, kernel_para=1, p=2, random_sample_T=0.01):

    X_S = sp.mat(X_S)
    X_T = sp.mat(X_T)

    n_S = X_S.shape[0]
    n_T = X_T.shape[0]
    if random_sample_T != 1:
        print str(int(n_T * random_sample_T)) + " samples taken from the task domain"
        index_sample = sp.random.choice([i for i in range(n_T)], size=int(n_T * random_sample_T))
        X_T = X_T[index_sample, :]

        n_T = X_T.shape[0]

    n = n_S + n_T

    if m > (n):
        print ("m is larger then n_S+n_T, so it has been changed")
        m = n

    L = sp.zeros(shape=(n, n))
    L_SS = sp.ones(shape=(n_S, n_S)) / (n_S ** 2)
    L_TT = sp.ones(shape=(n_T, n_T)) / (n_T ** 2)
    L_ST = -sp.ones(shape=(n_S, n_T)) / (n_S * n_T)
    L_TS = -sp.ones(shape=(n_T, n_S)) / (n_S * n_T)

    L[0:n_S, 0:n_S] = L_SS
    L[n_S : n_S + n_T, n_S : n_S + n_T] = L_TT
    L[n_S : n_S + n_T, 0:n_S] = L_TS
    L[0:n_S, n_S : n_S + n_T] = L_ST

    R = pdist(sp.vstack([X_S, X_T]), metric="euclidean", p=p, w=None, V=None, VI=None)

    K = Gaussian(R, kernel_para, p)

    Id = sp.zeros(shape=(n, n))
    H = sp.zeros(shape=(n, n))
    sp.fill_diagonal(Id, 1)
    sp.fill_diagonal(H, 1)
    H -= 1.0 / n

    Id = sp.mat(Id)
    H = sp.mat(H)
    K = sp.mat(K)
    L = sp.mat(L)

    matrix = sp.linalg.inv(K * L * K + mu * Id) * sp.mat(K * H * K)

    eigen_values = sp.linalg.eig(matrix)

    eigen_val = eigen_values[0][0:m]
    eigen_vect = eigen_values[1][:, 0:m]
    return (eigen_val, eigen_vect, K, sp.vstack([X_S, X_T]))
예제 #9
0
def Laplace(R, p, sigma):
    def f(x):
        return sp.exp(-(x ** p) / (2 * sigma ** 2))

    res = map(f, R)
    ds = squareform(res)
    n = ds.shape[0]
    I = sp.zeros(shape=(n, n))
    sp.fill_diagonal(I, f(0))
    M = ds + I
    d = sp.sum(M, axis=0)
    D = sp.diag(d)

    return D - M
def H_effective(N, Omega, Sigma):  #N should be k dimension of Krylov space
    Eta = (N - 2) * 0.5 * Omega
    beta = []
    for i in range(N - 1):
        beta.append(sp.sqrt(i + 1) * Sigma)

    diag = [beta, beta]
    M = sp.sparse.diags(diag, [-1, 1]).toarray()
    sp.fill_diagonal(M, -Eta)

    M[1, 0] = 0
    M[0, 1] = 0
    M[0, 0] = -1 * Eta - Omega

    return M
예제 #11
0
 def _init_arrays(self):
     self.A = sp.empty((self.N + 1), dtype=sp.ndarray) #Elements 1..N
     
     self.r = sp.empty((self.N + 1), dtype=sp.ndarray) #Elements 0..N
     self.l = sp.empty((self.N + 1), dtype=sp.ndarray)        
     
     self.r[0] = sp.zeros((self.D[0], self.D[0]), dtype=self.typ, order=self.odr)  
     self.l[0] = m.eyemat(self.D[0], dtype=self.typ)
 
     for n in xrange(1, self.N + 1):
         self.r[n] = sp.zeros((self.D[n], self.D[n]), dtype=self.typ, order=self.odr)
         self.l[n] = sp.zeros((self.D[n], self.D[n]), dtype=self.typ, order=self.odr)
         self.A[n] = sp.zeros((self.q[n], self.D[n - 1], self.D[n]), dtype=self.typ, order=self.odr)
         
     sp.fill_diagonal(self.r[self.N], 1.)        
예제 #12
0
 def _init_arrays(self):
     self.A = sp.empty((self.N + 1), dtype=sp.ndarray) #Elements 1..N
     
     self.r = sp.empty((self.N + 1), dtype=sp.ndarray) #Elements 0..N
     self.l = sp.empty((self.N + 1), dtype=sp.ndarray)        
     
     self.r[0] = sp.zeros((self.D[0], self.D[0]), dtype=self.typ, order=self.odr)  
     self.l[0] = m.eyemat(self.D[0], dtype=self.typ)
 
     for n in range(1, self.N + 1):
         self.r[n] = sp.zeros((self.D[n], self.D[n]), dtype=self.typ, order=self.odr)
         self.l[n] = sp.zeros((self.D[n], self.D[n]), dtype=self.typ, order=self.odr)
         self.A[n] = sp.zeros((self.q[n], self.D[n - 1], self.D[n]), dtype=self.typ, order=self.odr)
         
     sp.fill_diagonal(self.r[self.N], 1.)        
예제 #13
0
def H_effective(k, Omega, Sigma, N):
    Eta = (N - 2) * 0.5 * Omega
    beta = []

    for i in range(k - 1):
        beta.append(sp.sqrt(i + 1) * Sigma)

    diag = [beta, beta]
    M = sp.sparse.diags(diag, [-1, 1]).toarray()
    sp.fill_diagonal(M, -Eta)

    M[1, 0] = 0
    M[0, 1] = 0
    M[0, 0] = -1 * Eta - Omega
    return M
예제 #14
0
 def _init_arrays(self):
     self.A = sp.empty((self.N + 1), dtype=sp.ndarray) #Elements 1..N
     
     self.r = sp.empty((self.N + 1), dtype=sp.ndarray) #Elements 0..N
     self.l = sp.empty((self.N + 1), dtype=sp.ndarray)        
     
     self.r[0] = sp.zeros((self.D[0], self.D[0]), dtype=self.typ, order=self.odr)  
     self.l[0] = sp.eye(self.D[0], self.D[0], dtype=self.typ).copy(order=self.odr) #Already set the 0th element (not a dummy)    
 
     for n in xrange(1, self.N + 1):
         self.r[n] = sp.zeros((self.D[n], self.D[n]), dtype=self.typ, order=self.odr)
         self.l[n] = sp.zeros((self.D[n], self.D[n]), dtype=self.typ, order=self.odr)
         self.A[n] = sp.empty((self.q[n], self.D[n - 1], self.D[n]), dtype=self.typ, order=self.odr)
         
     sp.fill_diagonal(self.r[self.N], 1.)        
예제 #15
0
    def print_verbose_message(self, i):
        """Method to print training statistics if Verbose is TRUE"""

        # Memory usage (does not work in Windows)
        # print('Peak memory usage: %.2f MB' % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / infer_platform() ))

        # Variance explained
        r2 = s.asarray(
            self.calculate_variance_explained(total=True)).mean(axis=0)
        r2[r2 < 0] = 0.
        print("- Variance explained:  " + "   ".join([
            "View %s: %.2f%%" % (m, 100 * r2[m]) for m in range(self.dim["M"])
        ]))

        # Sparsity levels of the weights
        W = self.nodes["W"].getExpectation()
        foo = [s.mean(s.absolute(W[m]) < 1e-3) for m in range(self.dim["M"])]
        print("- Fraction of zero weights:  " + "   ".join([
            "View %s: %.0f%%" % (m, 100 * foo[m]) for m in range(self.dim["M"])
        ]))

        # Correlation between factors
        Z = self.nodes["Z"].getExpectation()
        Z += s.random.normal(s.zeros(Z.shape), 1e-10)
        r = s.absolute(corr(Z.T, Z.T))
        s.fill_diagonal(r, 0)
        print("- Maximum correlation between factors: %.2f" % (s.nanmax(r)))

        # Factor norm
        bar = s.mean(s.square(Z), axis=0)
        print("- Factor norms:  " +
              " ".join(["%.2f" % bar[k] for k in range(Z.shape[1])]))

        # Tau
        tau = self.nodes["Tau"].getExpectation()
        print("- Tau per view (average):  " + "   ".join([
            "View %s: %.2f" % (m, tau[m].mean()) for m in range(self.dim["M"])
        ]))

        #Sigma:
        if 'Sigma' in self.nodes.keys():
            sigma = self.nodes["Sigma"]
            if i >= sigma.start_opt and i % sigma.opt_freq == 0:
                print('Sigma node has been optimised:\n- Lengthscales = %s \n- Scale = %s' % \
                (np.array2string(sigma.get_ls(), precision=2, separator=", "), np.array2string(1-sigma.get_zeta(), precision=2, separator=", ")))

        print("\n")
예제 #16
0
    def get_covariances(self, hyperparams):
        """
        Return the Cholesky decompositions L and alpha::

            K 
            L     = chol(K)
            alpha = solve(L,t)
            return [covar_struct] = get_covariances(hyperparam)
            
        **Parameters:**
        
        hyperparams: dict
            The hyperparameters for cholesky decomposition
            
        x, y: [double]
            input x and output y for cholesky decomposition.
            If one/both is/are set, there will be no chaching allowed
            
        """
        if self._is_cached(hyperparams) and not self._active_set_indices_changed:
            pass
        else:
            Knoise = 0
            #1. use likelihood object to perform the inference
            if self.likelihood is not None:
                Knoise = self.likelihood.K(hyperparams['lik'], self._get_x())
            K = self.covar.K(hyperparams['covar'], self._get_x())
            K+= Knoise
            L = jitChol(K)[0].T # lower triangular
            alpha = solve_chol(L, self._get_y(hyperparams)) # TODO: not sure about this one

	    # DPOTRI computes the inverse of a real symmetric positive definite
	    # matrix A using the Cholesky factorization 
	    Linv = scipy.lib.lapack.flapack.dpotri(L)[0]
	    # Copy the matrix and kill the diagonal (we don't want to have 2*var)
	    Kinv = Linv.copy()
	    SP.fill_diagonal(Linv, 0)
	    # build the full inverse covariance matrix. This is correct: verified
	    # by doing SP.allclose(Kinv, linalg.inv(K))
	    Kinv += Linv.T
	    
	    self._covar_cache = {'K': K, 'L':L, 'alpha':alpha, 'Kinv': Kinv}
            #store hyperparameters for cachine
            self._covar_cache['hyperparams'] = copy.deepcopy(hyperparams)
            self._active_set_indices_changed = False
        return self._covar_cache 
예제 #17
0
    def _construct_eqns_rls(self, data):
        """Construct VAR equation system with RLS constraint.
        """
        (l, m, t) = sp.shape(data)
        n = (l - self.p) * t     # number of linear relations
        # Construct matrix x (predictor variables)
        x = sp.zeros((n + m * self.p, m * self.p))
        for i in range(m):
            for k in range(1, self.p + 1):
                x[:n, i * self.p + k - 1] = sp.reshape(data[self.p - k:-k, i, :], n)
        sp.fill_diagonal(x[n:, :], self.delta)

        # Construct vectors yi (response variables for each channel i)
        y = sp.zeros((n + m * self.p, m))
        for i in range(m):
            y[:n, i] = sp.reshape(data[self.p:, i, :], n)

        return x, y
예제 #18
0
def _construct_var_eqns_rls(data, p, delta):
    """Construct VAR equation system with RLS constraint.
        """
    (l, m, t) = sp.shape(data)
    n = (l - p) * t  # number of linear relations
    # Construct matrix x (predictor variables)
    x = sp.zeros((n + m * p, m * p))
    for i in range(m):
        for k in range(1, p + 1):
            x[:n, i * p + k - 1] = sp.reshape(data[p - k:-k, i, :], n)
    sp.fill_diagonal(x[n:, :], delta)

    # Construct vectors yi (response variables for each channel i)
    y = sp.zeros((n + m * p, m))
    for i in range(m):
        y[:n, i] = sp.reshape(data[p:, i, :], n)

    return x, y
예제 #19
0
def find_angles(markers: Sequence[Marker],
                weight_x: float = 1.0,
                weight_y: float = 1.0) -> Generator[float, None, None]:
    """Computes the angles of the given markers and returns them."""
    # Compute distance matrix.
    positions = scipy.array(list(
        (m.position.x, m.position.y) for m in markers))
    weights = scipy.array([weight_x, weight_y])
    distance_matrix = scipy.spatial.distance.pdist(positions / weights)
    distance_matrix = scipy.spatial.distance.squareform(distance_matrix)
    assert distance_matrix.shape[0] == distance_matrix.shape[1]
    num_markers = distance_matrix.shape[0]

    # Find nearest neighbors.
    scipy.fill_diagonal(distance_matrix, scipy.inf)
    nearest_neighbors = scipy.argmin(distance_matrix, axis=1)

    # Use direction to nearest neighbor as angle.
    for i in range(num_markers):
        n = nearest_neighbors[i]
        v = positions[n] - positions[i]
        yield scipy.math.atan2(v[1], v[0])
예제 #20
0
파일: lmm_slow.py 프로젝트: PMBio/envGPLVM
def nLLeval(ldelta,X,Y,K):
    """evaluate the negative LL of a LMM with kernel USU.T"""
    delta=SP.exp(ldelta)
    H = K + delta*SP.eye(X.shape[0])
    #the slow way: compute all terms needed
    L = jitChol(H)[0].T # lower triangular
    Linv = scipy.lib.lapack.flapack.dpotri(L)[0]
    Kinv = Linv.copy()
    SP.fill_diagonal(Linv, 0)
    Kinv += Linv.T
    n = X.shape[0]

    #1. get max likelihood weight beta
    pinv = SP.dot(SP.linalg.inv(SP.dot(SP.dot(X.T,Kinv),X)),X.T)
    y_   = SP.dot(Kinv,Y)
    beta = SP.dot(pinv,y_)
    #2. get max likelihood sigma
    yres = (Y-SP.dot(X,beta))
    R = SP.dot(SP.dot(yres.T,Kinv),yres)
    sigma2 = R/n

    nLL = 0.5*(-n*SP.log(2*SP.pi*sigma2) - 2*SP.log(L.diagonal()).sum() - 1./sigma2*R)
    return (-nLL);
예제 #21
0
    def _LML_Xm(self, hyperparams):
        if 'Xm' not in hyperparams.keys():
            return {}
        Q, determinant = self._compute_sparsified_covariance(hyperparams)
        
        # get correction term:
        correction_term = (1./(2.*scipy.power(self.jitter,2))) * scipy.sum(self.covar.Kdiag(hyperparams['covar'], self._get_x()) - scipy.diag(Q, 0))
        
        # add jitter
        n = self._get_x().shape[0]
        jit_mat = scipy.zeros((n,n))
        scipy.fill_diagonal(jit_mat, self.jitter)
        Q += jit_mat
        
        y = self._get_y()

        import pdb;pdb.set_trace()
        
        rv  = -.5*y.shape[0]*scipy.log(2*scipy.pi)
        rv -= determinant
        rv -= .5*scipy.dot(y.T, linalg.solve(Q, y))
        
        return rv - correction_term
예제 #22
0
파일: lmm_slow.py 프로젝트: afcarl/envGPLVM
def nLLeval(ldelta, X, Y, K):
    """evaluate the negative LL of a LMM with kernel USU.T"""
    delta = SP.exp(ldelta)
    H = K + delta * SP.eye(X.shape[0])
    #the slow way: compute all terms needed
    L = jitChol(H)[0].T  # lower triangular
    Linv = scipy.lib.lapack.flapack.dpotri(L)[0]
    Kinv = Linv.copy()
    SP.fill_diagonal(Linv, 0)
    Kinv += Linv.T
    n = X.shape[0]

    #1. get max likelihood weight beta
    pinv = SP.dot(SP.linalg.inv(SP.dot(SP.dot(X.T, Kinv), X)), X.T)
    y_ = SP.dot(Kinv, Y)
    beta = SP.dot(pinv, y_)
    #2. get max likelihood sigma
    yres = (Y - SP.dot(X, beta))
    R = SP.dot(SP.dot(yres.T, Kinv), yres)
    sigma2 = R / n

    nLL = 0.5 * (-n * SP.log(2 * SP.pi * sigma2) -
                 2 * SP.log(L.diagonal()).sum() - 1. / sigma2 * R)
    return (-nLL)
예제 #23
0
    def _init_arrays(self):
        self.A = sp.empty((self.N + 1), dtype=sp.ndarray)  #Elements 1..N

        self.r = sp.empty((self.N + 1), dtype=sp.ndarray)  #Elements 0..N
        self.l = sp.empty((self.N + 1), dtype=sp.ndarray)

        self.r[0] = sp.zeros((self.D[0], self.D[0]),
                             dtype=self.typ,
                             order=self.odr)
        self.l[0] = sp.eye(self.D[0], self.D[0], dtype=self.typ).copy(
            order=self.odr)  #Already set the 0th element (not a dummy)

        for n in xrange(1, self.N + 1):
            self.r[n] = sp.zeros((self.D[n], self.D[n]),
                                 dtype=self.typ,
                                 order=self.odr)
            self.l[n] = sp.zeros((self.D[n], self.D[n]),
                                 dtype=self.typ,
                                 order=self.odr)
            self.A[n] = sp.empty((self.q[n], self.D[n - 1], self.D[n]),
                                 dtype=self.typ,
                                 order=self.odr)

        sp.fill_diagonal(self.r[self.N], 1.)
예제 #24
0
파일: tdvp_gen.py 프로젝트: bcriger/evoMPS
    def __init__(self, numsites, D, q):
        """Creates a new TDVP_MPS object.
        
        The TDVP_MPS class implements the time-dependent variational principle 
        for matrix product states for systems with open boundary conditions and
        a hamiltonian consisting of a nearest-neighbour interaction term and a 
        single-site term (external field).
        
        Bond dimensions will be adjusted where they are too high to be useful.
        FIXME: Add reference.
        
        Parameters
        ----------
        numsites : int
            The number of lattice sites.
        D : ndarray
            A 1-d array, length numsites, of integers indicating the desired bond dimensions.
        q : ndarray
            A 1-d array, also length numsites, of integers indicating the 
            dimension of the hilbert space for each site.
    
        Returns
        -------
        sqrt_A : ndarray
            An array of the same shape and type as A containing the matrix square root of A.        
        """
        self.eps = sp.finfo(self.typ).eps
        
        self.N = numsites
        self.D = sp.array(D)
        self.q = sp.array(q)
        
        #Make indicies correspond to the thesis
        self.K = sp.empty((self.N + 1), dtype=sp.ndarray) #Elements 1..N
        self.C = sp.empty((self.N), dtype=sp.ndarray) #Elements 1..N-1
        self.A = sp.empty((self.N + 1), dtype=sp.ndarray) #Elements 1..N
        
        self.r = sp.empty((self.N + 1), dtype=sp.ndarray) #Elements 0..N
        self.l = sp.empty((self.N + 1), dtype=sp.ndarray)        
        
        if (self.D.ndim != 1) or (self.q.ndim != 1):
            raise NameError('D and q must be 1-dimensional!')
            
        #TODO: Check for integer type.
        
        #Don't do anything pointless
        self.D[0] = 1
        self.D[self.N] = 1

        qacc = 1
        for n in reversed(xrange(self.N)):
            if qacc < self.D.max(): #Avoid overflow!
                qacc *= self.q[n + 1]

            if self.D[n] > qacc:
                self.D[n] = qacc
                
        qacc = 1
        for n in xrange(1, self.N + 1):
            if qacc < self.D.max(): #Avoid overflow!
                qacc *= q[n - 1]

            if self.D[n] > qacc:
                self.D[n] = qacc
        
        self.r[0] = sp.zeros((self.D[0], self.D[0]), dtype=self.typ, order=self.odr)  
        self.l[0] = sp.eye(self.D[0], self.D[0], dtype=self.typ).copy(order=self.odr) #Already set the 0th element (not a dummy)    
    
        for n in xrange(1, self.N + 1):
            self.K[n] = sp.zeros((self.D[n-1], self.D[n-1]), dtype=self.typ, order=self.odr)    
            self.r[n] = sp.zeros((self.D[n], self.D[n]), dtype=self.typ, order=self.odr)
            self.l[n] = sp.zeros((self.D[n], self.D[n]), dtype=self.typ, order=self.odr)
            self.A[n] = sp.empty((self.q[n], self.D[n-1], self.D[n]), dtype=self.typ, order=self.odr)
            if n < self.N:
                self.C[n] = sp.empty((self.q[n], self.q[n+1], self.D[n-1], self.D[n+1]), dtype=self.typ, order=self.odr)
        sp.fill_diagonal(self.r[self.N], 1.)
        self.setup_A()
        
        self.eta = sp.zeros((self.N + 1), dtype=self.typ)
예제 #25
0
scale = 9
sd = 1.323

#petsc4py.init(sys.argv)

#diag = PETSc.Vec().create(PETSc.COMM_WORLD)
diag = PETSc.Vec().create()
diag.setSizes(n)
diag.setType('mpi')
diag.set(scale * sd)

hvals = scipy.ones(n**2) * scale

H = PETSc.Mat().createDense([n, n])
H.setValues(range(n), range(n), hvals)
H.setDiagonal(diag)
H.assemblyBegin()
H.assemblyEnd()

# Do matrix exponential with PETSc
R = expm(H, k)
print("PETSc expm")
print(R.getValues(range(n), range(n)))

# Test against SciPy solver
#T = scipy.identity(n)*scale
T = scipy.ones((n, n)) * scale
scipy.fill_diagonal(T, scale * sd)
print("SciPy expm")
print(scipy.linalg.expm(T))
예제 #26
0
def parameters(cmdargs):
    """
    cmdargs:
             -q, qubits 
             -k, lrule
             -f, nmems
    """

    # The Hopfield parameters
    hparams = {
        'numNeurons': cmdargs['qubits'],
        'inputState': [ 2*sp.random.random_integers(0,1)-1 
                        for k in xrange(cmdargs['qubits']) ],
        'learningRule': cmdargs['simtype'],
        'numMemories': int(cmdargs['farg'])
        }

    # Construct memories
    memories = [ [ 2*sp.random.random_integers(0,1)-1 
                   for k in xrange(hparams['numNeurons']) ]
                 for j in xrange(hparams['numMemories']) ]

    # At least one pattern must be one Hamming unit away from the input
    memories[0] = list(hparams['inputState'])
    memories[0][sp.random.random_integers(0,hparams['numNeurons']-1)] *= -1

    # Make sure all other patterns are not the input state
    def hamdist(a,b):
        """ Calculate Hamming distance. """
        return sp.sum(abs(sp.array(a)-sp.array(b))/2.0)
    # Loop over additional memories, if there are any
    for imem, mem in enumerate(memories[1:]):
        while hamdist(mem, hparams['inputState']) < 1.0:
            # Flip a random spin
            rndbit = sp.random.random_integers(0,hparams['numNeurons']-1)
            memories[imem+1][rndbit] *= -1
        
    # Basic simulation params
    nQubits = hparams['numNeurons']
    T = 1000.0 # sp.arange(0.1, 15, 0.5)
    # T = sp.array([10.0, 20.0, 50.0, 100.0, 500.0, 
    #               1000.0, 5000.0, 10000.0, 50000.0])
    dt = 0.005*T

    # Define states for which to track probabilities in time
    # import statelabels
    # label_list = statelabels.GenerateLabels(nQubits)
    # stateoverlap = []
    # for mem in memories:
    #     # Convert spins to bits
    #     bitstr = ''.join([ '0' if k == 1 else '1' for k in mem ])
    #     # Get the index of the current (converted) memory and add it to list
    #     stateoverlap.append([ label_list.index(bitstr), bitstr ])
    stateoverlap = None

    # Output parameters
    binary = 1 # Save output files as binary Numpy format
    progressout = 0 # Output simulation progress over anneal timesteps

    eigspecdat = 1 # Output data for eigspec
    eigspecplot = 0 # Plot eigspec
    eigspecnum = 2**nQubits # Number of eigenvalues to output
    fidelplot = 0 # Plot fidelity
    fideldat = 0 # Output fidelity data
    fidelnumstates = 2**nQubits # Check fidelity with this number of eigenstates
    overlapdat = 0 # Output overlap data
    overlapplot = 0 # Plot overlap
    solveMethod = 'ExpPert' # 'ExpPert', 'SuzTrot', 'ForRuth', 'BCM'

    # Output directory stuff
    probdir = 'data/hopfield_exp3_nodiag/n'+str(nQubits)+'p'+\
        str(hparams['numMemories'])+hparams['learningRule']
    if isinstance(T, collections.Iterable):
        probdir += 'MultiT'
    if os.path.isdir(probdir):
        outlist = sorted([ int(name) for name in os.listdir(probdir) 
                           if name.isdigit() ])
    else:
        outlist = []
    outnum = outlist[-1] + 1 if outlist else 0
    outputdir = probdir + '/' + str(outnum) + '/'

    probshow = 0 # Print final state probabilities to screen
    probout = 1 # Output probabilities to file
    mingap = 0 # Record the minimum spectral gap

    errchk = 0 # Error-checking on/off (for simulation accuracy)
    eps = 0.01 # Numerical error in normalization condition (1 - norm < eps)

    # Specify a QUBO (convert to Ising = True), or alpha, beta directly 
    # (convert = False), and also specify the signs on the Ising Hamiltonian 
    # terms (you can specify coefficients too for some problems if needed)
    isingConvert = 0
    isingSigns = {'hx': -1, 'hz': -1, 'hzz': -1}

    # Construct network Ising parameters
    neurons = nQubits

    # This is gamma, the appropriate weighting on the input vector
    # isingSigns['hz'] *= 1 - (len(hparams['inputState']) - 
    #                          hparams['inputState'].count(0))/(2*neurons)
    # isingSigns['hz'] *= 1.0/(5*neurons)
    # isingSigns['hz'] *= 0.2

    alpha = sp.array(hparams['inputState'])
    beta = sp.zeros((neurons,neurons))
    delta = sp.array([])

    # Construct the memory matrix according to a learning rule
    if hparams['learningRule'] == 'hebb':
        # Hebb rule
        isingSigns['hz'] *= 0.5
        memMat = sp.matrix(memories).T
        beta = sp.triu(memMat*memMat.T)/float(neurons)
    elif hparams['learningRule'] == 'stork':
        # Storkey rule
        isingSigns['hz'] *= 0.15
        Wm = sp.zeros((neurons,neurons))
        for m, mem in enumerate(memories):
            Am = sp.outer(mem,mem) - sp.eye(neurons)
            Wm += (Am - Am*Wm - Wm*Am)/float(neurons)
        beta = sp.triu(Wm)
    elif hparams['learningRule'] == 'proj':
        isingSigns['hz'] *= 0.15
        # Moore-Penrose pseudoinverse rule
        memMat = sp.matrix(memories).T
        beta = sp.triu(memMat * sp.linalg.pinv(memMat))
    sp.fill_diagonal(beta, 0.0)
    # Some outputs
    outputs = {
        'nQubits': nQubits,
        'learningRule': hparams['learningRule'],
        'outdir': probdir,
        'inputState': hparams['inputState'],
        'memories': memories,
        'answer': memories[0],
        'annealTime': list(T) if isinstance(T, collections.Iterable) else T
               }

    ############################################################################
    ######## All variables must be specified here, do NOT change the keys ######
    ############################################################################

    return {
        'nQubits': nQubits,
        'Q': None,
        'T': T,
        'dt': dt,
        'outputdir': outputdir,
        'errchk': errchk,
        'eps': eps,
        'isingConvert': isingConvert,
        'isingSigns': isingSigns,
        'outputs': outputs,
        'alpha': alpha,
        'beta': beta,
        'delta': delta,
        'eigdat': eigspecdat,
        'eigplot': eigspecplot,
        'eignum': eigspecnum,
        'fiddat': fideldat,
        'fidplot': fidelplot,
        'fidnumstates': fidelnumstates,
        'overlapdat': overlapdat,
        'overlapplot': overlapplot,
        'outdir': outputdir,
        'binary': binary,
        'progressout': progressout,
        'probshow': probshow,
        'probout': probout,
        'mingap': mingap,
        'stateoverlap': stateoverlap,
        'hzscale': None,
        'hzzscale': None,
        'hxscale': None,
        'solveMethod': solveMethod
        }
예제 #27
0
scale = 9
sd = 1.323

#petsc4py.init(sys.argv)

#diag = PETSc.Vec().create(PETSc.COMM_WORLD)
diag = PETSc.Vec().create()
diag.setSizes(n)
diag.setType('mpi')
diag.set(scale*sd)

hvals = scipy.ones(n**2)*scale

H = PETSc.Mat().createDense([n, n])
H.setValues(range(n), range(n), hvals)
H.setDiagonal(diag)
H.assemblyBegin()
H.assemblyEnd()

# Do matrix exponential with PETSc
R = expm(H, k)
print ("PETSc expm")
print(R.getValues(range(n), range(n)))

# Test against SciPy solver
#T = scipy.identity(n)*scale
T = scipy.ones((n,n))*scale
scipy.fill_diagonal(T, scale*sd)
print ("SciPy expm")
print (scipy.linalg.expm(T))
예제 #28
0
def SSTCA(X_S, y_S, X_T, m=40, mu=0.1, lamb=0.0001, kernel_para=1, p=2, sigma=1, gamma=0.5, random_sample_T=0.01):

    X_S = sp.mat(X_S)
    X_T = sp.mat(X_T)

    y_S = sp.array(y_S)

    n_S = X_S.shape[0]
    n_T = X_T.shape[0]
    if random_sample_T != 1:
        print str(int(n_T * random_sample_T)) + " samples taken from the task domain"
        index_sample = sp.random.choice([i for i in range(n_T)], size=int(n_T * random_sample_T))
        X_T = X_T[index_sample, :]

        n_T = X_T.shape[0]

    n = n_S + n_T

    if m > (n):
        print ("m is larger then n_S+n_T, so it has been changed")
        m = n

    L = sp.zeros(shape=(n, n))
    L_SS = sp.ones(shape=(n_S, n_S)) / (n_S ** 2)
    L_TT = sp.ones(shape=(n_T, n_T)) / (n_T ** 2)
    L_ST = -sp.ones(shape=(n_S, n_T)) / (n_S * n_T)
    L_TS = -sp.ones(shape=(n_T, n_S)) / (n_S * n_T)

    L[0:n_S, 0:n_S] = L_SS
    L[n_S : n_S + n_T, n_S : n_S + n_T] = L_TT
    L[n_S : n_S + n_T, 0:n_S] = L_TS
    L[0:n_S, n_S : n_S + n_T] = L_ST

    R = pdist(sp.vstack([X_S, X_T]), metric="euclidean", p=p, w=None, V=None, VI=None)

    K = Gaussian(R, kernel_para, p)

    Id = sp.zeros(shape=(n, n))
    H = sp.zeros(shape=(n, n))
    sp.fill_diagonal(Id, 1)
    sp.fill_diagonal(H, 1)
    H -= 1.0 / n

    LA = Laplace(R, p, sigma)

    K_hat_y = sp.zeros(shape=(n, n))
    K_hat_y[0, 0] = 1
    for i in range(1, n_S):
        K_hat_y[i, i] = 1
        for j in range(i):
            if y_S[i] == y_S[j]:
                K_hat_y[i, j] = 1
                K_hat_y[j, i] = 1

    K_hat_y = gamma * K_hat_y + (1 - gamma) * Id
    Id = sp.mat(Id)
    H = sp.mat(H)
    K = sp.mat(K)
    L = sp.mat(L)
    LA = sp.mat(LA)

    matrix = sp.linalg.inv(K * (L + lamb * LA) * K + mu * Id) * sp.mat(K * H * K_hat_y * H * K)

    eigen_values = sp.linalg.eig(matrix)

    eigen_val = eigen_values[0][0:m]
    eigen_vect = eigen_values[1][:, 0:m]
    return (eigen_val, eigen_vect, K, LA, K_hat_y, sp.vstack([X_S, X_T]))
tau_sol_vec_Mon = []

avg = {}
num_infeasible = sp.zeros(len(range_num_d2d_pairs))
for prin in range_num_d2d_pairs:
    num_d2d_pairs = prin
    # rmin = sp.multiply(0.2, sp.log(2))
    time_sol_vec = []
    EE_sol_vec = []
    tau_sol_vec = []

    for Mon in xrange(max_chan_realizaion):
        try:
            max_d2d_to_d2d_gains_diff = sp.copy(max_d2d_to_d2d_gains[:, :,
                                                                     Mon])
            sp.fill_diagonal(max_d2d_to_d2d_gains_diff, 0)
            max_d2d_to_d2d_gains_diag = sp.subtract(
                max_d2d_to_d2d_gains[:, :, Mon], max_d2d_to_d2d_gains_diff)

            uav_to_d2d_gains = max_uav_to_d2d_gains[:num_d2d_pairs, Mon]
            d2d_to_d2d_gains = max_d2d_to_d2d_gains[:num_d2d_pairs, :
                                                    num_d2d_pairs, Mon]
            d2d_to_d2d_gains_diff = max_d2d_to_d2d_gains_diff[:num_d2d_pairs, :
                                                              num_d2d_pairs]
            d2d_to_d2d_gains_diag = sp.subtract(d2d_to_d2d_gains,
                                                d2d_to_d2d_gains_diff)

            # ############################################################
            # This code is used to find the initial point for EEmax algorithm
            # ############################################################
예제 #30
0
def get_avg_sep():
    real = tau_class.TauClass('../../data/delta-2.fits.gz')
    real.get_data(skewers_perc=1., ylabel='DELTA')

    avg_r = real.pixel_data[:, 2].mean()
    print("Average distance is {}".format(avg_r))

    ra, dec = real.q_loc[:, 0], real.q_loc[:, 1]

    delta_ra = sp.abs(ra - ra[:, None])
    delta_dec = sp.abs(dec - dec[:, None])

    angle = 2 * sp.arcsin(
        sp.sqrt(
            sp.sin(delta_dec / 2.)**2 +
            sp.cos(dec) * sp.cos(dec[:, None]) * sp.sin(delta_ra / 2.)**2))

    # remove self-distances
    sp.fill_diagonal(angle, sp.inf)

    min_angle = angle.min(0) * 180 / sp.pi
    avg_angle = min_angle.mean()

    p_cmd = "Average distance is {} h^-1 Mpc \n".format(avg_r)
    p_cmd += "Average angle is {} degree \n".format(avg_angle)

    avg_sep = avg_angle * avg_r * sp.pi / 180
    p_cmd += "Average transverse separation is {} h^-1 Mpc \n".format(avg_sep)

    print(p_cmd)
    # *************************************************************************

    simul = tau_class.TauClass('../../data/simulations/'
                               'v6.0.1_delta_transmission_RMplate.fits')
    simul.get_data(skewers_perc=1.)

    avg_r = simul.pixel_data[:, 2].mean()
    print("Average distance is {}".format(avg_r))

    ra, dec = sp.deg2rad(simul.q_loc[:, 0]), sp.deg2rad(simul.q_loc[:, 1])

    delta_ra = sp.abs(ra - ra[:, None])
    delta_dec = sp.abs(dec - dec[:, None])

    angle = 2 * sp.arcsin(
        sp.sqrt(
            sp.sin(delta_dec / 2.)**2 +
            sp.cos(dec) * sp.cos(dec[:, None]) * sp.sin(delta_ra / 2.)**2))

    # remove self-distances
    sp.fill_diagonal(angle, sp.inf)

    min_angle = angle.min(0) * 180 / sp.pi
    avg_angle = min_angle.mean()

    p_cmd = "Average distance is {} h^-1 Mpc \n".format(avg_r)
    p_cmd += "Average angle is {} degree \n".format(avg_angle)

    avg_sep = avg_angle * avg_r * sp.pi / 180
    p_cmd += "Average transverse separation is {} h^-1 Mpc \n".format(avg_sep)

    print(p_cmd)
예제 #31
0
        # extract all properties in a list of floats
        properties = [float(p) for i,p in enumerate(f.readline().split()) if i > 0]

        # construct molecule from species x y z charge
        molecule = []
        try:
            for j in range(natoms):
                # molecule.append([float(x) for i, x in enumerate(f.readline().split()) if i > 0])
                molecule.append([x for i, x in enumerate(f.readline().split())])
            molecule = sp.array(molecule)
            nuc_charge = species_to_nuc_charge(molecule[:,0])
            molecule = sp.array(molecule[:,1:], dtype='float64')
            pos = molecule[:,:-1]
            # construct Coulomb matrix
            X = sp.outer(nuc_charge, nuc_charge) / (dist.squareform(dist.pdist(pos)) + sp.eye(natoms))
            sp.fill_diagonal(X, 0.5*sp.absolute(nuc_charge)**2.4)
            # add all properties of current molecule to dataset
            [dataset[k].append(properties[ordering[i]]) for i, k in enumerate(dataset.keys())]
            dataset['idx'][-1] = int(dataset['idx'][-1]) # index is an integer

            X_all.append(X)
            charge.append(molecule[:,-1])
        except Exception as err:
            print "Molecule %s skipped, malformed file" % file
            print err
            pass

        
dataset['X'] = X_all
dataset['charge'] = charge
for k,w in dataset.iteritems():
예제 #32
0
        # construct molecule from species x y z charge
        molecule = []
        try:
            for j in range(natoms):
                # molecule.append([float(x) for i, x in enumerate(f.readline().split()) if i > 0])
                molecule.append(
                    [x for i, x in enumerate(f.readline().split())])
            molecule = sp.array(molecule)
            nuc_charge = species_to_nuc_charge(molecule[:, 0])
            molecule = sp.array(molecule[:, 1:], dtype='float64')
            pos = molecule[:, :-1]
            # construct Coulomb matrix
            X = sp.outer(nuc_charge, nuc_charge) / (
                dist.squareform(dist.pdist(pos)) + sp.eye(natoms))
            sp.fill_diagonal(X, 0.5 * sp.absolute(nuc_charge)**2.4)
            # add all properties of current molecule to dataset
            [
                dataset[k].append(properties[ordering[i]])
                for i, k in enumerate(dataset.keys())
            ]
            dataset['idx'][-1] = int(dataset['idx'][-1])  # index is an integer

            X_all.append(X)
            charge.append(molecule[:, -1])
        except Exception as err:
            print "Molecule %s skipped, malformed file" % file
            print err
            pass

dataset['X'] = X_all
예제 #33
0
def main():
    # Data structure: train_sigs contains all signatures from the folder enrollment
    # train_sigs[writerID] contains the five signatures of person writerID
    # train_sigs[writerID][n] contains the nth signature of person writerID

    train_sigs = read_signature_files(train_directory, "enrollment")
    test_sigs = read_signature_files(test_directory, "validation")

    n_test_files = 0
    for writerID in train_sigs:
        for i in train_sigs[writerID]:
            train_sigs[writerID][i] = compute_features(train_sigs[writerID][i])
        for i in test_sigs[writerID]:
            test_sigs[writerID][i] = compute_features(test_sigs[writerID][i])
            n_test_files += 1

    # Compute relevant DTW distances on test set
    try:
        import multiprocessing as mp
        n_threads = mp.cpu_count()
        n_threads = min([n_threads, 16])
        n_threads = max([n_threads, 2])

        distance_dict = {}
        with mp.Pool(n_threads) as pool:
            for writerID in test_sigs:
                distance_dict[writerID] = pool.apply_async(
                    compare_test_train_set, (test_sigs, train_sigs, writerID))
            pool.close()
            pool.join()
        for writerID in distance_dict:
            distance_dict[writerID] = distance_dict[writerID].get()
    except:
        print("Multiprocessing not supported, falling back to single thread.")
        distance_dict = {}
        for writerID in test_sigs:
            distance_dict[writerID] = compare_test_train_set(
                test_sigs, train_sigs, writerID)

    # Compute statistics to be fed to classifier

    stats = pd.DataFrame(columns=[
        "writer", "signature", "min_dist", "mean_dist", "max_dist", "rms_dist",
        "min_mean_internal", "max_mean_internal", "verdict", "rms_internal",
        "mean_internal"
    ],
                         index=range(n_test_files))

    stats.loc[:]["min_dist"] = pd.to_numeric(stats.loc[:]["min_dist"])
    stats.loc[:]["max_dist"] = pd.to_numeric(stats.loc[:]["max_dist"])
    stats.loc[:]["min_mean_internal"] = pd.to_numeric(
        stats.loc[:]["min_mean_internal"])
    stats.loc[:]["max_mean_internal"] = pd.to_numeric(
        stats.loc[:]["max_mean_internal"])
    stats.loc[:]["mean_dist"] = pd.to_numeric(stats.loc[:]["mean_dist"])
    stats.loc[:]["rms_dist"] = pd.to_numeric(stats.loc[:]["mean_dist"])
    stats.loc[:]["rms_internal"] = pd.to_numeric(stats.loc[:]["rms_internal"])
    stats.loc[:]["mean_internal"] = pd.to_numeric(
        stats.loc[:]["mean_internal"])

    index = 0
    for writerID in test_sigs:
        for i in test_sigs[writerID]:
            dissimilarities = distance_dict[writerID][i]
            stats.loc[index, [
                'writer', 'signature', 'min_dist', 'mean_dist', 'max_dist',
                'rms_dist'
            ]] = writerID, i, dissimilarities.min(), dissimilarities.mean(
            ), dissimilarities.max(), sc.sqrt((dissimilarities**2).mean())
            index += 1

    # Compute characteristics of training signatures
    for writerID in train_sigs:
        names = list(train_sigs[writerID].keys())
        distance_matrix = sc.zeros([len(names), len(names)])
        root_mean_square = 0.
        mean = 0.
        for i in range(len(names) - 1):
            for j in range(i + 1, len(names)):
                distance_matrix[j, i] = distance_matrix[i, j] = \
                        (DTWDistance(train_sigs[writerID][names[i]], train_sigs[writerID][names[j]]))
                root_mean_square += distance_matrix[i, j]**2
                mean += distance_matrix[i, j]
        root_mean_square /= sc.math.factorial(len(names) - 1)
        mean /= sc.math.factorial(len(names) - 1)

        stats.loc[stats["writer"] == writerID,
                  "rms_internal"] = sc.sqrt(root_mean_square)
        stats.loc[stats["writer"] == writerID, "mean_internal"] = mean
        stats.loc[stats["writer"] == writerID, "max_mean_internal"] = \
                (distance_matrix.max(1)).mean()
        sc.fill_diagonal(distance_matrix, sc.inf)
        stats.loc[stats["writer"] == writerID, "min_mean_internal"] = \
                (distance_matrix.min(1)).mean()

    #stats["diff_min"]  = stats["min_dist"]  / stats["min_mean_internal"];
    #stats["diff_max"]  = stats["max_dist"]  / stats["max_mean_internal"];
    #stats["diff_mean"] = stats["mean_dist"] / stats["mean_internal"];
    stats["diff_rms"] = stats["rms_dist"] / stats["rms_internal"]

    stats.sort_values(by=["writer", "signature"])
    outputFile = open(os.path.join(".", "sig_ver_prediction.txt"), 'w')

    for writerID in test_sigs.keys():
        outputFile.write(writerID + ', ')
        for sigID in test_sigs[writerID].keys():
            outputFile.write(sigID + ', {}, '.format(
                float(stats.loc[(stats['writer'] == writerID)
                                & (stats['signature'] == sigID)]['diff_rms'])))
        outputFile.write('\n')
예제 #34
0
def parameters(cmdargs):
    """
    cmdargs:
             -q, qubits 
             -k, lrule
             -f, nmems
    """

    # The Hopfield parameters
    hparams = {
        'numNeurons':
        cmdargs['qubits'],
        'inputState': [
            2 * sp.random.random_integers(0, 1) - 1
            for k in xrange(cmdargs['qubits'])
        ],
        'learningRule':
        cmdargs['simtype'],
        'numMemories':
        int(cmdargs['farg'])
    }

    # Construct memories
    memories = [[
        2 * sp.random.random_integers(0, 1) - 1
        for k in xrange(hparams['numNeurons'])
    ] for j in xrange(hparams['numMemories'])]

    # At least one pattern must be one Hamming unit away from the input
    memories[0] = list(hparams['inputState'])
    memories[0][sp.random.random_integers(0, hparams['numNeurons'] - 1)] *= -1

    # Make sure all other patterns are not the input state
    def hamdist(a, b):
        """ Calculate Hamming distance. """
        return sp.sum(abs(sp.array(a) - sp.array(b)) / 2.0)

    # Loop over additional memories, if there are any
    for imem, mem in enumerate(memories[1:]):
        while hamdist(mem, hparams['inputState']) < 1.0:
            # Flip a random spin
            rndbit = sp.random.random_integers(0, hparams['numNeurons'] - 1)
            memories[imem + 1][rndbit] *= -1

    # Basic simulation params
    nQubits = hparams['numNeurons']
    T = 1000.0  # sp.arange(0.1, 15, 0.5)
    # T = sp.array([10.0, 20.0, 50.0, 100.0, 500.0,
    #               1000.0, 5000.0, 10000.0, 50000.0])
    dt = 0.005 * T

    # Define states for which to track probabilities in time
    # import statelabels
    # label_list = statelabels.GenerateLabels(nQubits)
    # stateoverlap = []
    # for mem in memories:
    #     # Convert spins to bits
    #     bitstr = ''.join([ '0' if k == 1 else '1' for k in mem ])
    #     # Get the index of the current (converted) memory and add it to list
    #     stateoverlap.append([ label_list.index(bitstr), bitstr ])
    stateoverlap = None

    # Output parameters
    binary = 1  # Save output files as binary Numpy format
    progressout = 0  # Output simulation progress over anneal timesteps

    eigspecdat = 1  # Output data for eigspec
    eigspecplot = 0  # Plot eigspec
    eigspecnum = 2**nQubits  # Number of eigenvalues to output
    fidelplot = 0  # Plot fidelity
    fideldat = 0  # Output fidelity data
    fidelnumstates = 2**nQubits  # Check fidelity with this number of eigenstates
    overlapdat = 0  # Output overlap data
    overlapplot = 0  # Plot overlap
    solveMethod = 'ExpPert'  # 'ExpPert', 'SuzTrot', 'ForRuth', 'BCM'

    # Output directory stuff
    probdir = 'data/hopfield_exp3_nodiag/n'+str(nQubits)+'p'+\
        str(hparams['numMemories'])+hparams['learningRule']
    if isinstance(T, collections.Iterable):
        probdir += 'MultiT'
    if os.path.isdir(probdir):
        outlist = sorted(
            [int(name) for name in os.listdir(probdir) if name.isdigit()])
    else:
        outlist = []
    outnum = outlist[-1] + 1 if outlist else 0
    outputdir = probdir + '/' + str(outnum) + '/'

    probshow = 0  # Print final state probabilities to screen
    probout = 1  # Output probabilities to file
    mingap = 0  # Record the minimum spectral gap

    errchk = 0  # Error-checking on/off (for simulation accuracy)
    eps = 0.01  # Numerical error in normalization condition (1 - norm < eps)

    # Specify a QUBO (convert to Ising = True), or alpha, beta directly
    # (convert = False), and also specify the signs on the Ising Hamiltonian
    # terms (you can specify coefficients too for some problems if needed)
    isingConvert = 0
    isingSigns = {'hx': -1, 'hz': -1, 'hzz': -1}

    # Construct network Ising parameters
    neurons = nQubits

    # This is gamma, the appropriate weighting on the input vector
    # isingSigns['hz'] *= 1 - (len(hparams['inputState']) -
    #                          hparams['inputState'].count(0))/(2*neurons)
    # isingSigns['hz'] *= 1.0/(5*neurons)
    # isingSigns['hz'] *= 0.2

    alpha = sp.array(hparams['inputState'])
    beta = sp.zeros((neurons, neurons))
    delta = sp.array([])

    # Construct the memory matrix according to a learning rule
    if hparams['learningRule'] == 'hebb':
        # Hebb rule
        isingSigns['hz'] *= 0.5
        memMat = sp.matrix(memories).T
        beta = sp.triu(memMat * memMat.T) / float(neurons)
    elif hparams['learningRule'] == 'stork':
        # Storkey rule
        isingSigns['hz'] *= 0.15
        Wm = sp.zeros((neurons, neurons))
        for m, mem in enumerate(memories):
            Am = sp.outer(mem, mem) - sp.eye(neurons)
            Wm += (Am - Am * Wm - Wm * Am) / float(neurons)
        beta = sp.triu(Wm)
    elif hparams['learningRule'] == 'proj':
        isingSigns['hz'] *= 0.15
        # Moore-Penrose pseudoinverse rule
        memMat = sp.matrix(memories).T
        beta = sp.triu(memMat * sp.linalg.pinv(memMat))
    sp.fill_diagonal(beta, 0.0)
    # Some outputs
    outputs = {
        'nQubits': nQubits,
        'learningRule': hparams['learningRule'],
        'outdir': probdir,
        'inputState': hparams['inputState'],
        'memories': memories,
        'answer': memories[0],
        'annealTime': list(T) if isinstance(T, collections.Iterable) else T
    }

    ############################################################################
    ######## All variables must be specified here, do NOT change the keys ######
    ############################################################################

    return {
        'nQubits': nQubits,
        'Q': None,
        'T': T,
        'dt': dt,
        'outputdir': outputdir,
        'errchk': errchk,
        'eps': eps,
        'isingConvert': isingConvert,
        'isingSigns': isingSigns,
        'outputs': outputs,
        'alpha': alpha,
        'beta': beta,
        'delta': delta,
        'eigdat': eigspecdat,
        'eigplot': eigspecplot,
        'eignum': eigspecnum,
        'fiddat': fideldat,
        'fidplot': fidelplot,
        'fidnumstates': fidelnumstates,
        'overlapdat': overlapdat,
        'overlapplot': overlapplot,
        'outdir': outputdir,
        'binary': binary,
        'progressout': progressout,
        'probshow': probshow,
        'probout': probout,
        'mingap': mingap,
        'stateoverlap': stateoverlap,
        'hzscale': None,
        'hzzscale': None,
        'hxscale': None,
        'solveMethod': solveMethod
    }