Пример #1
0
def calcMultiStateEnergy(ensemble, ensemble_ref, cutoff=12, k=1, U0=None):
    import prody as pd
    import scipy as sci
    import scipy.spatial as sp
    import numpy as np

    n_conf = ensemble.numConfs()
    n_ref = ensemble_ref.numConfs()
    energies = np.zeros((n_ref, n_conf))

    for i, conf_ref in zip(xrange(n_ref), ensemble_ref.iterCoordsets()):
        gnm = pd.GNM()
        gnm.buildKirchhoff(conf_ref, cutoff, k)
        kirchhoff = gnm.getKirchhoff()
        np.fill_diagonal(kirchhoff, 0)
        kirchhoff = abs(kirchhoff)
        eq_dists = sp.distance.squareform(sp.distance.pdist(conf_ref, metric="euclidean"))
        for j, conf in zip(xrange(n_conf), ensemble.iterCoordsets()):
            dists = sp.distance.squareform(sp.distance.pdist(conf, metric="euclidean"))
            springs = (k / 2) * np.multiply(np.square(dists - eq_dists), kirchhoff)
            springs = sci.triu(springs)
            energies[i, j] = np.sum(springs)

            # return np.min(energies, axis=0)
    return energies
Пример #2
0
    def __call__(self, A, y, sigma, rng=None):
        m, n = A.shape
        transpose = self.transpose
        if transpose is None:
            # transpose if matrix is fat, but not if sigmas for each neuron
            transpose = m < n and sigma.size == 1

        if transpose:
            # substitution: x = A'*xbar, G*xbar = b where G = A*A' + lambda*I
            G = np.dot(A, A.T)
            b = y
        else:
            # multiplication by A': G*x = A'*b where G = A'*A + lambda*I
            G = np.dot(A.T, A)
            b = np.dot(A.T, y)

        # add L2 regularization term 'lambda' = m * sigma**2
        np.fill_diagonal(G, G.diagonal() + m * sigma**2)

        try:
            import scipy.linalg
            factor = scipy.linalg.cho_factor(G, overwrite_a=True)
            x = scipy.linalg.cho_solve(factor, b)
        except ImportError:
            L = np.linalg.cholesky(G)
            L = np.linalg.inv(L.T)
            x = np.dot(L, np.dot(L.T, b))

        x = np.dot(A.T, x) if transpose else x
        info = {'rmses': rmses(A, x, y)}
        return x, info
Пример #3
0
def test_binary_perplexity_stability():
    # Binary perplexity search should be stable.
    # The binary_search_perplexity had a bug wherein the P array
    # was uninitialized, leading to sporadically failing tests.
    k = 10
    n_samples = 100
    random_state = check_random_state(0)
    distances = random_state.randn(n_samples, 2).astype(np.float32)
    # Distances shouldn't be negative
    distances = np.abs(distances.dot(distances.T))
    np.fill_diagonal(distances, 0.0)
    last_P = None
    neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
    for _ in range(100):
        P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
                                      3, verbose=0)
        P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
        # Convert the sparse matrix to a dense one for testing
        P1 = P1.toarray()
        if last_P is None:
            last_P = P
            last_P1 = P1
        else:
            assert_array_almost_equal(P, last_P, decimal=4)
            assert_array_almost_equal(P1, last_P1, decimal=4)
Пример #4
0
def cholesky(A, y, sigma, transpose=None):
    """Solve the least-squares system using the Cholesky decomposition."""
    m, n = A.shape
    if transpose is None:
        # transpose if matrix is fat, but not if we have sigmas for each neuron
        transpose = m < n and sigma.size == 1

    if transpose:
        # substitution: x = A'*xbar, G*xbar = b where G = A*A' + lambda*I
        G = np.dot(A, A.T)
        b = y
    else:
        # multiplication by A': G*x = A'*b where G = A'*A + lambda*I
        G = np.dot(A.T, A)
        b = np.dot(A.T, y)

    # add L2 regularization term 'lambda' = m * sigma**2
    np.fill_diagonal(G, G.diagonal() + m * sigma**2)

    try:
        import scipy.linalg
        factor = scipy.linalg.cho_factor(G, overwrite_a=True)
        x = scipy.linalg.cho_solve(factor, b)
    except ImportError:
        L = np.linalg.cholesky(G)
        L = np.linalg.inv(L.T)
        x = np.dot(L, np.dot(L.T, b))

    x = np.dot(A.T, x) if transpose else x
    info = {'rmses': npext.rms(y - np.dot(A, x), axis=0)}
    return x, info
Пример #5
0
def test_gradient():
    # Test gradient of Kullback-Leibler divergence.
    random_state = check_random_state(0)

    n_samples = 50
    n_features = 2
    n_components = 2
    alpha = 1.0

    distances = random_state.randn(n_samples, n_features).astype(np.float32)
    distances = np.abs(distances.dot(distances.T))
    np.fill_diagonal(distances, 0.0)
    X_embedded = random_state.randn(n_samples, n_components).astype(np.float32)

    P = _joint_probabilities(distances, desired_perplexity=25.0,
                             verbose=0)

    def fun(params):
        return _kl_divergence(params, P, alpha, n_samples, n_components)[0]

    def grad(params):
        return _kl_divergence(params, P, alpha, n_samples, n_components)[1]

    assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
                        decimal=5)
Пример #6
0
def build_bucket_correlation(pos_delta, params, margin):
    risk_class = pos_delta.RiskClass.unique()[0]

    g = 0

    if risk_class == 'IR':
        all_curr = pos_delta.Group.unique()
        g = np.ones((len(all_curr), len(all_curr)))

        if not margin == 'Curvature':
            for i in range(len(all_curr)):
                for j in range(len(all_curr)):
                    CRi = pos_delta.iloc[[i]].CR.values[0]
                    CRj = pos_delta.iloc[[j]].CR.values[0]

                    g[i][j] = min(CRi, CRj) / max(CRi, CRj)

        g = g * params.IR_Gamma
    elif risk_class == 'CreditQ':
        g = params.CreditQ_Corr
    elif risk_class == 'CreditNonQ':
        g = params.CreditNonQ_Corr
    elif risk_class == 'Equity':
        g = params.Equity_Corr
    elif risk_class == 'Commodity':
        g = params.Commodity_Corr

    if margin == 'Curvature':
        g = pow(g, 2)

    g = np.mat(g)
    np.fill_diagonal(g, 0)

    return g
Пример #7
0
	def calc_lik_bin(j,L):
		i = recursive[j]
		Q = Q_list[Q_index[i]]
		r_vec=r_vec_list[Q_index[i]]
		t=delta_t[i] 
		r_ind= r_vec_indexes[i]
		sign=  sign_list[i]
		rho_vec= np.prod(abs(sign-r_vec[r_ind]),axis=1)
		# Pt = np.ones((4,4))
		# Pt= linalg.expm(Q.T *(t))
		w, vl = scipy.linalg.eig(Q,left=True, right=False)
		# w = eigenvalues
		# vl = eigenvectors
		vl_inv = np.linalg.inv(vl)
		
		
		d= exp(w*t) 
		m1 = np.zeros((4,4))
		np.fill_diagonal(m1,d)
		Pt1 = np.dot(vl,m1)
		Pt = np.dot(Pt1,vl_inv)
		#print vl, m1, vl_inv
		
		PvDes_temp = L[j,:]
		condLik_temp= np.dot(PvDes_temp,Pt)
		PvDes= condLik_temp *rho_vec
		L[j+1,:]= PvDes
Пример #8
0
 def test_boolean_inputs(self):
     w = np.ones((135,135), dtype=np.bool)
     np.fill_diagonal(w, False)
     la = LinearAssignment(w)
     #if the input doesn't get converted to a float, the masking
     #doesn't work properly
     self.assertEqual(la.orig_c.dtype, np.float64)
Пример #9
0
def expm(A, n_factors=None, normalize=False):
    """Simple matrix exponential to replace Scipy's matrix exponential

    This just uses a recursive (factored) version of the Taylor series,
    and is not as good as Scipy (which uses Pade approximants). The hard
    part with this implementation is choosing the length of the Taylor
    series. A longer series is generally needed for a matrix with a larger
    eigenvalues, but I'm not exactly sure how these relate. I'm using
    a heuristic based on the matrix norm, since this is kind-of related
    to the size of eigenvalues, though for larger norms the function
    becomes inaccurate no matter the length of the series.

    This function is mostly intended for use in `filter_design`, where
    the matrices should be small, both in dimensions and norm.
    """
    if A.ndim != 2 or A.shape[0] != A.shape[1]:
        raise ValueError("Argument must be a square matrix")

    a = np.linalg.norm(A)
    if normalize:
        a = int(a)
        A = A / float(a)

    if n_factors is None:
        n_factors = 20 if normalize else max(20, int(a))

    Y = np.zeros_like(A)
    for i in range(n_factors, 0, -1):
        Y = np.dot(A / float(i), Y)
        np.fill_diagonal(Y, Y.diagonal() + 1)  # add identity matrix

    return np.linalg.matrix_power(Y, a) if normalize else Y
def integration(MA,systemByNode):
    '''
    INTEGRATION      Integration coefficient
    I = INTEGRATION(MA,systemByNode) calculates the integration coefficient
    for each node of the network. The integration coefficient of a region
    corresponds to the average probability that this region is in the same
    network community as regions from other systems.
    Inputs:     MA,     Module Allegiance matrix, where element (i,j)
                        represents the probability that nodes i and j
                        belong to the same community
                        systemByNode,	vector or cell array containing the system
                         assignment for each node
    Outputs:    I,              integration coefficient for each node
    _______________________________________________
    Marcelo G Mattar (08/21/2014)
    '''

    # Initialize output

    I = np.zeros(shape=(len(systemByNode),1), dtype = np.double)

    # Make sure the diagonal of the module allegiance is all nan

    MA = np.double(MA)
    np.fill_diagonal(MA, np.nan)


    # Calculate the integration for each node

    for i in range(systemByNode.size):
        thisSystem = systemByNode[i]
        I[i] = np.nanmean(MA[i,systemByNode!=thisSystem])
    return I
Пример #11
0
def random_cov(d, diff=None):
    """Generate random covariance matrix.
    
    Generates a random covariance matrix, or two dependent covariance matrices
    if the argument `diff` is given.
    
    """
    S = 0.8*np.random.randn(d,d)
    copy_triu_to_tril(S)
    np.fill_diagonal(S,0)
    mineig = linalg.eigvalsh(S, eigvals=(0,0))[0]
    drand = 0.8*np.random.randn(d)
    if mineig < 0:
        S += np.diag(np.exp(drand)-mineig)
    else:
        S += np.diag(np.exp(drand))
    if not diff:
        return S.T
    S2 = S * np.random.randint(2, size=(d,d))*np.exp(diff*np.random.randn(d,d))
    copy_triu_to_tril(S2)
    np.fill_diagonal(S2,0)
    mineig = linalg.eigvalsh(S2, eigvals=(0,0))[0]
    drand += diff*np.random.randn(d)
    if mineig < 0:
        S2 += np.diag(np.exp(drand)-mineig)
    else:
        S2 += np.diag(np.exp(drand))
    return S.T, S2.T
Пример #12
0
def detect_duplicates(file_name, dist_thr=0.1, FOV=(512, 512)):
    """
    Removes duplicate ROIs from file file_name

    Parameters:
    -----------
        file_name:  .zip file with all rois

        dist_thr:   distance threshold for duplicate detection

        FOV:        dimensions of the FOV

    Returns:
    --------
        duplicates  : list of indeces with duplicate entries

        ind_keep    : list of kept indeces

    """
    rois = nf_read_roi_zip(file_name, FOV)
    cm = [scipy.ndimage.center_of_mass(mm) for mm in rois]
    sp_rois = scipy.sparse.csc_matrix(
        np.reshape(rois, (rois.shape[0], np.prod(FOV))).T)
    D = distance_masks([sp_rois, sp_rois], [cm, cm], 10)[0]
    np.fill_diagonal(D, 1)
    indeces = np.where(D < dist_thr)      # pairs of duplicate indeces

    ind = list(np.unique(indeces[1][indeces[1] > indeces[0]]))
    ind_keep = list(set(range(D.shape[0])) - set(ind))
    duplicates = list(np.unique(np.concatenate((indeces[0], indeces[1]))))

    return duplicates, ind_keep
def recruitment(MA,systemByNode):
    '''
    RECRUITMENT      Recruitment coefficient
    R = RECRUITMENT(MA,systemByNode) calculates the recruitment coefficient
    for each node of the network. The recruitment coefficient of a region
    corresponds to the average probability that this region is in the same
    network community as other regions from its own system.
    Inputs:     MA,     Module Allegiance matrix, where element (i,j)
                       represents the probability that nodes i and j
                       belong to the same community
               systemByNode,	vector or cell array containing the system
                       assignment for each node
    Outputs:    R,              recruitment coefficient for each node
    _______________________________________________
    Marcelo G Mattar (08/21/2014)
    '''

    # Initialize output

    R = np.zeros(shape=(systemByNode.size,1), dtype = np.double);


    # Make sure the diagonal of the module allegiance is all nan

    MA = np.double(MA)
    np.fill_diagonal(MA, np.nan)

    # Calculate the recruitment for each node

    for i in range(systemByNode.size):
        thisSystem = systemByNode[i]
        R[i] = np.nanmean(MA[i,systemByNode==thisSystem])
    return R
Пример #14
0
def condense_matrix(matrice, smallest_index, method='upgma'):
    """Matrice condensation in the next iteration

    Smallest index is returned from find_smallest_index.
    For both leaf at i and j a new distance is calculated from the average of the corresponding
    row or the corresponding columns
    We then replace the first index (row and column) by the average vector obtained
    and the second index by an array with large numbers so that
    it is never chosen again with find_smallest_index.
    Now the new regroupement distance value is at the first position! (on row and column)
    """
    first_index, second_index = smallest_index
    # get the rows and make a new vector by updating distance
    rows = np.take(matrice, smallest_index, 1)
    # default we use upgma
    if(method.lower() == 'nj'):
        new_vector = (
            np.sum(rows, 1) - matrice[first_index, second_index]) * 0.5

    else:
        new_vector = np.average(rows, 1)

    # replace info in the row and column for first index with new_vector
    matrice[second_index] = new_vector
    matrice[:, second_index] = new_vector
    np.fill_diagonal(matrice, 0)
    # replace the info in the row and column for the second index with
    # high numbers so that it is ignored
    return remove_ij(matrice, first_index, first_index)
Пример #15
0
    def resample_mu_and_Sig(self, A, W):
        """
        Resample p given observations of the weights
        """
        Abool = A.astype(np.bool)

        for c1 in xrange(self.C):
            for c2 in xrange(self.C):
                mask = self._get_mask(Abool, c1, c2)
                self._gaussians[c1][c2].resample(W[mask])

        # Resample self connection
        if self.special_case_self_conns:
            mask = np.eye(self.N, dtype=np.bool) & Abool
            self._self_gaussian.resample(W[mask])

        # Resample covariance
        A_offdiag = Abool.copy()
        np.fill_diagonal(A_offdiag, False)
        W_cent = (W - self.Mu)[A_offdiag]
        self._cov_model.resample(W_cent)

        # Update gaussians
        for c1 in xrange(self.C):
            for c2 in xrange(self.C):
                self._gaussians[c1][c2].sigma = self._cov_model.sigma
Пример #16
0
def T_classic(data, sigma=1000.):
    ''' Implementation of diffusion transition matrix calculation
    from Haghverdi et al http://biorxiv.org/content/early/2016/03/02/041384
    '''
    n, G = data.shape
    d2 = np.zeros((n, n))

    for g in range(G):
        d2_g = np.subtract.outer(data[:, g], data[:, g]) ** 2
        d2 += d2_g

    W = np.exp(-d2 / (2. * sigma ** 2))

    D = W.sum(0)

    q = np.multiply.outer(D, D)
    np.fill_diagonal(W, 0.)

    H = W / q

    D_ = np.diag(H.sum(1))
    D_inv = np.diag(1. / np.diag(D_))

    T = D_inv.dot(H)

    phi0 = np.diag(D_) / np.diag(D_).sum()
    
    return T, phi0
def Gaussian_elim_pp(A):
    '''
    Perform LU factorization by Gaussian elimination with Partial Pivoting(pp).
    A is unchanged in the process.
    Use the LU = PA expression so L is a lower triangular matrix. Return P, L 
    and U. In the process L also need to be permutated. 
    '''
    n = A.shape[0]
    L = np.zeros((n,n)) # Left diagonal 0 because of the permutation later
    P = np.identity(n)
    U = np.copy(A)
    if n == 1:
        print 'Cannot be used for scalar'
        return     
    for k in xrange(0,n - 1):
        # The original index should be +k
        max_idex = np.argmax(np.abs(U[:,k][k:])) + k
        if max_idex != k:
            row_index = np.arange(n)
            row_index[max_idex], row_index[k] = k, max_idex
            U = U[row_index,:]  # Permutate U for partial pivoting
            L = L[row_index,:]  # Permutate L too
            P = P[row_index,:]  # Form the total permutation matrix          
        if U[k,k] == 0:
            print 'ERROR: pivot is zero'
            continue
        for i in xrange(k + 1,n):
            L[i,k] =  U[i,k] / U[k,k]
            U[i,k] = 0
        for j in xrange(k + 1,n):
            for i in xrange(k + 1, n):
                U[i,j] = U[i,j] - L[i,k] * U[k,j]
    np.fill_diagonal(L,1)       # Fill the diagonal of L in the end 
    return P, L, U
def load_vocabulary():
    """
    Unpickle and return the content of the file
    `free_associations_vocabulary` generated by `process_data.py`.

    Output
    ------
        W:          association matrix
        id2voc:     dictionary, keys are word ids and values words
        voc2id:     dictionary, keys are words and values word ids
    """

    # absolute path to the free association norms data
    path = '../../data/associationmatrices/'
    filename = 'association_norms_symm'

    try:
        with open(path+filename, 'rb') as f:
            id2voc = pickle.load(f)
            voc2id = pickle.load(f)
            Wsparse = pickle.load(f)
    except IOError:
        raise IOError('Association matrix "' + filename + '" not found' +
                      ' in ' + path + '. To generate the matrix run ' +
                      'generate_association_matrix.py')

    # convert to dense matrix (stored as sparse for memory reasons)
    W = np.asarray(Wsparse.todense())

    # normalize weights to [0-1] interval
    W /= 2
    np.fill_diagonal(W, 1.)

    return W, id2voc, voc2id
Пример #19
0
  def fit(self, X, y):
    """
    X: data matrix, (n x d)
    y: scalar labels, (n)
    """
    X, labels = check_X_y(X, y)
    n, d = X.shape
    num_dims = self.num_dims
    if num_dims is None:
        num_dims = d
    # Initialize A to a scaling matrix
    A = np.zeros((num_dims, d))
    np.fill_diagonal(A, 1./(np.maximum(X.max(axis=0)-X.min(axis=0), EPS)))

    # Run NCA
    dX = X[:,None] - X[None]  # shape (n, n, d)
    tmp = np.einsum('...i,...j->...ij', dX, dX)  # shape (n, n, d, d)
    masks = labels[:,None] == labels[None]
    for it in xrange(self.max_iter):
      for i, label in enumerate(labels):
        mask = masks[i]
        Ax = A.dot(X.T).T  # shape (n, num_dims)

        softmax = np.exp(-((Ax[i] - Ax)**2).sum(axis=1))  # shape (n)
        softmax[i] = 0
        softmax /= softmax.sum()

        t = softmax[:, None, None] * tmp[i]  # shape (n, d, d)
        d = softmax[mask].sum() * t.sum(axis=0) - t[mask].sum(axis=0)
        A += self.learning_rate * A.dot(d)

    self.X_ = X
    self.A_ = A
    self.n_iter_ = it
    return self
def calculate_couplings_levine(dt: float, w_jk: Matrix,
                               w_kj: Matrix) -> Matrix:
    """
    Compute the non-adiabatic coupling according to:
    `Evaluation of the Time-Derivative Coupling for Accurate Electronic
    State Transition Probabilities from Numerical Simulations`.
    Garrett A. Meek and Benjamin G. Levine.
    dx.doi.org/10.1021/jz5009449 | J. Phys. Chem. Lett. 2014, 5, 2351−2356
    """
    # Orthonormalize the Overlap matrices
    w_jk = np.linalg.qr(w_jk)[0]
    w_kj = np.linalg.qr(w_kj)[0]

    # Diagonal matrix
    w_jj = np.diag(np.diag(w_jk))
    w_kk = np.diag(np.diag(w_kj))

    # remove the values from the diagonal
    np.fill_diagonal(w_jk, 0)
    np.fill_diagonal(w_kj, 0)

    # Components A + B
    acos_w_jj = np.arccos(w_jj)
    asin_w_jk = np.arcsin(w_jk)

    a = acos_w_jj - asin_w_jk
    b = acos_w_jj + asin_w_jk
    A = - np.sin(np.sinc(a))
    B = np.sin(np.sinc(b))

    # Components C + D
    acos_w_kk = np.arccos(w_kk)
    asin_w_kj = np.arcsin(w_kj)

    c = acos_w_kk - asin_w_kj
    d = acos_w_kk + asin_w_kj
    C = np.sin(np.sinc(c))
    D = np.sin(np.sinc(d))

    # Components E
    w_lj = np.sqrt(1 - (w_jj ** 2) - (w_kj ** 2))
    w_lk = -(w_jk * w_jj + w_kk * w_kj) / w_lj
    asin_w_lj = np.arcsin(w_lj)
    asin_w_lk = np.arcsin(w_lk)
    asin_w_lj2 = asin_w_lj ** 2
    asin_w_lk2 = asin_w_lk ** 2

    t1 = w_lj * w_lk * asin_w_lj
    x1 = np.sqrt((1 - w_lj ** 2) * (1 - w_lk ** 2)) - 1
    t2 = x1 * asin_w_lk
    t = t1 + t2
    E_nonzero = 2 * asin_w_lj * t / (asin_w_lj2 - asin_w_lk2)

    # Check whether w_lj is different of zero
    E1 = np.where(np.abs(w_lj) > 1e-8, E_nonzero, np.zeros(A.shape))

    E = np.where(np.isclose(asin_w_lj2, asin_w_lk2), w_lj ** 2, E1)

    cte = 1 / (2 * dt)
    return cte * (np.arccos(w_jj) * (A + B) + np.arcsin(w_kj) * (C + D) + E)
Пример #21
0
def test_get_vox_dims():
    # setup
    affine = np.eye(4)
    np.fill_diagonal(affine, [-3, 3, 3])

    output_dir = os.path.join(OUTPUT_DIR, inspect.stack()[0][3])
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # 3D vol
    vol = create_random_image(affine=affine)
    np.testing.assert_array_equal(get_vox_dims(vol), [3, 3, 3])

    # 3D image file
    saved_img_filename = os.path.join(output_dir, "vol.nii.gz")
    nibabel.save(vol, saved_img_filename)
    np.testing.assert_array_equal(get_vox_dims(vol), [3, 3, 3])

    # 4D niimg
    film = create_random_image(n_scans=10, affine=affine)
    np.testing.assert_array_equal(get_vox_dims(film), [3, 3, 3])

    # 4D image file
    film = create_random_image(n_scans=10, affine=affine)
    saved_img_filename = os.path.join(output_dir, "4D.nii.gz")
    nibabel.save(film, saved_img_filename)
    np.testing.assert_array_equal(get_vox_dims(film), [3, 3, 3])
Пример #22
0
    def add_batch(self, X, T, wc=None):
        """Add a batch of training data to an iterative solution, weighted if neeed.

        The batch is processed as a whole, the training data is splitted in `ELM.add_data()` method.
        With parameters HH_out, HT_out, the output will be put into these matrices instead of model.

        Args:
            X (matrix): input data matrix size (N * `inputs`)
            T (matrix): output data matrix size (N * `outputs`)
            wc (vector): vector of weights for data samples, one weight per sample, size (N * 1)
            HH_out, HT_out (matrix, optional): output matrices to add batch result into, always given together
        """
        H = self._project(X)
        T = T.astype(self.precision)
        if wc is not None:  # apply weights if given
            w = np.array(wc**0.5, dtype=self.precision)[:, None]  # re-shape to column matrix
            H *= w
            T *= w

        if self.HH is None:  # initialize space for self.HH, self.HT
            self.HH = np.zeros((self.L, self.L), dtype=self.precision)
            self.HT = np.zeros((self.L, self.outputs), dtype=self.precision)
            np.fill_diagonal(self.HH, self.norm)

        self.HH += np.dot(H.T, H)
        self.HT += np.dot(H.T, T)
Пример #23
0
    def _get_abs_corr_mat(self, X_filled, tolerance=1e-6):
        """Get absolute correlation matrix between features.

        Parameters
        ----------
        X_filled : ndarray, shape (n_samples, n_features)
            Input data with the most recent imputations.

        tolerance : float, optional (default=1e-6)
            ``abs_corr_mat`` can have nans, which will be replaced
            with ``tolerance``.

        Returns
        -------
        abs_corr_mat : ndarray, shape (n_features, n_features)
            Absolute correlation matrix of ``X`` at the beginning of the
            current round. The diagonal has been zeroed out and each feature's
            absolute correlations with all others have been normalized to sum
            to 1.
        """
        n_features = X_filled.shape[1]
        if (self.n_nearest_features is None or
                self.n_nearest_features >= n_features):
            return None
        abs_corr_mat = np.abs(np.corrcoef(X_filled.T))
        # np.corrcoef is not defined for features with zero std
        abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance
        # ensures exploration, i.e. at least some probability of sampling
        np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat)
        # features are not their own neighbors
        np.fill_diagonal(abs_corr_mat, 0)
        # needs to sum to 1 for np.random.choice sampling
        abs_corr_mat = normalize(abs_corr_mat, norm='l1', axis=0, copy=False)
        return abs_corr_mat
Пример #24
0
def knn_initialize(
        X,
        missing_mask,
        verbose=False,
        min_dist=1e-6,
        max_dist_multiplier=1e6):
    """
    Fill X with NaN values if necessary, construct the n_samples x n_samples
    distance matrix and set the self-distance of each row to infinity.

    Returns contents of X laid out in row-major, the distance matrix,
    and an "effective infinity" which is larger than any entry of the
    distance matrix.
    """
    X_row_major = X.copy("C")
    if missing_mask.sum() != np.isnan(X_row_major).sum():
        # if the missing values have already been zero-filled need
        # to put NaN's back in the data matrix for the distances function
        X_row_major[missing_mask] = np.nan
    D = all_pairs_normalized_distances(X_row_major)
    D_finite_flat = D[np.isfinite(D)]
    if len(D_finite_flat) > 0:
        max_dist = max_dist_multiplier * max(1, D_finite_flat.max())
    else:
        max_dist = max_dist_multiplier
    # set diagonal of distance matrix to a large value since we don't want
    # points considering themselves as neighbors
    np.fill_diagonal(D, max_dist)
    D[D < min_dist] = min_dist  # prevents 0s
    D[D > max_dist] = max_dist  # prevents infinities
    return X_row_major, D, max_dist
Пример #25
0
def test_mean():
    """tests various ways to compute mean - collapsing different combination of axes"""
    data = np.arange(100).reshape(10,10)
    ts_1 = TimeSeriesX.create(data, None, dims=['x','y'], coords={'x':np.arange(10)*2,
                                                                'y':np.arange(10),
                                                                    'samplerate': 1})
    grand_mean = ts_1.mean()

    assert grand_mean == 49.5

    x_mean  = ts_1.mean(dim='x')
    assert (x_mean == np.arange(45,55,1, dtype=np.float)).all()
    # checking axes
    assert(ts_1.y == x_mean.y).all()

    y_mean = ts_1.mean(dim='y')
    assert (y_mean == np.arange(4.5,95,10, dtype=np.float)).all()
    # checking axes
    assert (y_mean.x == ts_1.x).all()

    # test mean NaN
    data_2 = np.arange(100, dtype=np.float).reshape(10,10)
    np.fill_diagonal(data_2,np.NaN)
    # data_2[9,9] = 99


    ts_2 = TimeSeriesX.create(data_2, None, dims=['x','y'], coords={'x':np.arange(10)*2,
                                                                'y':np.arange(10),
                                                                    'samplerate': 1})

    grand_mean = ts_2.mean(skipna=True)
    assert grand_mean == 49.5
Пример #26
0
def question_1():
    # Adjacency matrix.
    A = numpy.matrix([
        [0, 0, 1, 0, 0, 1, 0, 0],
        [0, 0, 0, 0, 1, 0, 0, 1],
        [1, 0, 0, 1, 0, 1, 0, 0],
        [0, 0, 1, 0, 1, 0, 1, 0],
        [0, 1, 0, 1, 0, 0, 0, 1],
        [1, 0, 1, 0, 0, 0, 1, 0],
        [0, 0, 0, 1, 0, 1, 0, 1],
        [0, 1, 0, 0, 1, 0, 1, 0]
    ])
    rn, cn = A.shape

    # Degree matrix.
    D = numpy.asmatrix(numpy.zeros((rn, cn), int))
    numpy.fill_diagonal(D, sum(A))

    # Laplacian matrix.
    L = D - A

    sum_a = A.sum()
    sum_d = D.sum()
    sum_l = L.sum()
    nonzero_a = numpy.count_nonzero(A)
    nonzero_d = numpy.count_nonzero(D)
    nonzero_l = numpy.count_nonzero(L)

    print('A: sum={} #nonzero={}'.format(sum_a, nonzero_a))
    print('D: sum={} #nonzero={}'.format(sum_d, nonzero_d))
    print('L: sum={} #nonzero={}'.format(sum_l, nonzero_l))
Пример #27
0
    def build_kernels(self):
        """ Build the synaptic connectivity matrices """
        n = self.n
        # Compute all the possible distances
        dist = [self.build_distances(n, 0.917, 0.0, 1.0),
                self.build_distances(n, 0.083, 0.0, 1.0),
                self.build_distances(n, 0.912, 0.83, 1.0)]

        # Create a temporary vector containing gaussians
        g = np.empty((len(self.K), n, n))
        for j in range(len(self.K)):
            for i in range(n):
                # g[j, i] = self.K[j] * self.gaussian(dist[i], self.S[j])
                g[j, i] = self.K[j] * self.g(dist[j][i], self.S[j])
            g[j, self.m:self.k] = 0.0

        # GPe to STN connections
        W12 = np.zeros((n, n))
        W12[:self.m, self.k:] = g[0, self.k:, self.k:]

        # STN to GPe connections
        W21 = np.zeros((n, n))
        W21[self.k:, :self.m] = g[1, :self.m, :self.m]

        # GPe to GPe connections
        W22 = np.zeros((n, n))
        W22[self.k:, self.k:] = g[2, self.k:, self.k:]
        np.fill_diagonal(W22, 0.0)

        return W12, W21, W22, dist
Пример #28
0
def test_wcs_dropping():
    wcs = WCS(naxis=4)
    wcs.wcs.pc = np.zeros([4, 4])
    np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))
    pc = wcs.wcs.pc  # for later use below

    dropped = wcs.dropaxis(0)
    assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
    dropped = wcs.dropaxis(1)
    assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
    dropped = wcs.dropaxis(2)
    assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
    dropped = wcs.dropaxis(3)
    assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))

    wcs = WCS(naxis=4)
    wcs.wcs.cd = pc

    dropped = wcs.dropaxis(0)
    assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
    dropped = wcs.dropaxis(1)
    assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
    dropped = wcs.dropaxis(2)
    assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
    dropped = wcs.dropaxis(3)
    assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))
Пример #29
0
 def __call__(self, A, Y, rng=None, E=None):
     # form Gram matrix so we can add regularization
     sigma = self.reg * A.max()
     G = np.dot(A.T, A)
     Y = np.dot(A.T, Y)
     np.fill_diagonal(G, G.diagonal() + sigma)
     return super(NnlsL2, self).__call__(G, Y, rng=rng, E=E)
  def testBijector(self):
    x = np.float32(np.random.randn(3, 4, 4))

    y = x.copy()
    for i in range(x.shape[0]):
      np.fill_diagonal(y[i, :, :], np.exp(np.diag(x[i, :, :])))

    exp = tfb.Exp()
    b = tfb.TransformDiagonal(diag_bijector=exp)

    y_ = self.evaluate(b.forward(x))
    self.assertAllClose(y, y_)

    x_ = self.evaluate(b.inverse(y))
    self.assertAllClose(x, x_)

    fldj = self.evaluate(b.forward_log_det_jacobian(x, event_ndims=2))
    ildj = self.evaluate(b.inverse_log_det_jacobian(y, event_ndims=2))
    self.assertAllEqual(
        fldj,
        self.evaluate(exp.forward_log_det_jacobian(
            np.array([np.diag(x_mat) for x_mat in x]),
            event_ndims=1)))
    self.assertAllEqual(
        ildj,
        self.evaluate(exp.inverse_log_det_jacobian(
            np.array([np.diag(y_mat) for y_mat in y]),
            event_ndims=1)))
Пример #31
0
    def _srm(self, data):
        """Expectation-Maximization algorithm for fitting the probabilistic SRM.

        Parameters
        ----------

        data : list of 2D arrays, element i has shape=[voxels_i, samples]
            Each element in the list contains the fMRI data of one subject.

        Returns
        -------

        w : list of array, element i has shape=[voxels_i, features]
            The orthogonal transforms (mappings) :math:`W_i` for each subject.

        s : array, shape=[features, samples]
            The shared response.
        """

        subjects = len(data)

        self.random_state_ = np.random.RandomState(self.rand_seed)
        random_states = [
            np.random.RandomState(
                self.random_state_.randint(2 ** 32 - 1, dtype=np.int64)
            )
            for i in range(len(data))
        ]

        # Initialization step: initialize the outputs with initial values,
        # voxels with the number of voxels in each subject.
        w, _ = _init_w_transforms(data, self.features, random_states)
        shared_response = self._compute_shared_response(data, w)
        if logger.isEnabledFor(logging.INFO):
            # Calculate the current objective function value
            objective = self._objective_function(data, w, shared_response)
            logger.info("Objective function %f" % objective)

        # Main loop of the algorithm
        for iteration in range(self.n_iter):
            logger.info("Iteration %d" % (iteration + 1))

            # Update each subject's mapping transform W_i:
            for subject in range(subjects):
                a_subject = data[subject].dot(shared_response.T)
                perturbation = np.zeros(a_subject.shape)
                np.fill_diagonal(perturbation, 0.001)
                u_subject, _, v_subject = np.linalg.svd(
                    a_subject + perturbation, full_matrices=False
                )
                w[subject] = u_subject.dot(v_subject)

            # Update the shared response:
            shared_response = self._compute_shared_response(data, w)

            if logger.isEnabledFor(logging.INFO):
                # Calculate the current objective function value
                objective = self._objective_function(data, w, shared_response)
                logger.info("Objective function %f" % objective)

        return w, shared_response
Пример #32
0
    def estimate_shift2D(self,
                         reference='current',
                         correlation_threshold=None,
                         chunk_size=30,
                         roi=None,
                         normalize_corr=False,
                         sobel=True,
                         medfilter=True,
                         hanning=True,
                         plot=False,
                         dtype='float',
                         show_progressbar=None,
                         sub_pixel_factor=1):
        """Estimate the shifts in a image using phase correlation

        This method can only estimate the shift by comparing
        bidimensional features that should not change position
        between frames. To decrease the memory usage, the time of
        computation and the accuracy of the results it is convenient
        to select a region of interest by setting the roi keyword.

        Parameters
        ----------
        reference : {'current', 'cascade' ,'stat'}
            If 'current' (default) the image at the current
            coordinates is taken as reference. If 'cascade' each image
            is aligned with the previous one. If 'stat' the translation
            of every image with all the rest is estimated and by
            performing statistical analysis on the result the
            translation is estimated.
        correlation_threshold : {None, 'auto', float}
            This parameter is only relevant when reference='stat'.
            If float, the shift estimations with a maximum correlation
            value lower than the given value are not used to compute
            the estimated shifts. If 'auto' the threshold is calculated
            automatically as the minimum maximum correlation value
            of the automatically selected reference image.
        chunk_size : {None, int}
            If int and reference='stat' the number of images used
            as reference are limited to the given value.
        roi : tuple of ints or floats (left, right, top, bottom)
            Define the region of interest. If int(float) the position
            is given axis index(value). Note that ROIs can be used
            in place of a tuple.
        sobel : bool
            apply a sobel filter for edge enhancement
        medfilter :  bool
            apply a median filter for noise reduction
        hanning : bool
            Apply a 2d hanning filter
        plot : bool or 'reuse'
            If True plots the images after applying the filters and
            the phase correlation. If 'reuse', it will also plot the images,
            but it will only use one figure, and continuously update the images
            in that figure as it progresses through the stack.
        dtype : str or dtype
            Typecode or data-type in which the calculations must be
            performed.
        %s
        sub_pixel_factor : float
            Estimate shifts with a sub-pixel accuracy of 1/sub_pixel_factor
            parts of a pixel. Default is 1, i.e. no sub-pixel accuracy.

        Returns
        -------
        shifts : list of array
            List of estimated shifts

        Notes
        -----
        The statistical analysis approach to the translation estimation
        when using reference='stat' roughly follows [*]_ . If you use
        it please cite their article.

        References
        ----------
        .. [*] Schaffer, Bernhard, Werner Grogger, and Gerald Kothleitner.
           “Automated Spatial Drift Correction for EFTEM Image Series.”
           Ultramicroscopy 102, no. 1 (December 2004): 27–36.

        """
        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        self._check_signal_dimension_equals_two()
        if roi is not None:
            # Get the indices of the roi
            yaxis = self.axes_manager.signal_axes[1]
            xaxis = self.axes_manager.signal_axes[0]
            roi = tuple([xaxis._get_index(i) for i in roi[2:]] +
                        [yaxis._get_index(i) for i in roi[:2]])

        ref = None if reference == 'cascade' else \
            self.__call__().copy()
        shifts = []
        nrows = None
        images_number = self.axes_manager._max_index + 1
        if plot == 'reuse':
            # Reuse figure for plots
            plot = plt.figure()
        if reference == 'stat':
            nrows = images_number if chunk_size is None else \
                min(images_number, chunk_size)
            pcarray = ma.zeros((
                nrows,
                self.axes_manager._max_index + 1,
            ),
                               dtype=np.dtype([('max_value', np.float),
                                               ('shift', np.int32, (2, ))]))
            nshift, max_value = estimate_image_shift(
                self(),
                self(),
                roi=roi,
                sobel=sobel,
                medfilter=medfilter,
                hanning=hanning,
                normalize_corr=normalize_corr,
                plot=plot,
                dtype=dtype,
                sub_pixel_factor=sub_pixel_factor)
            np.fill_diagonal(pcarray['max_value'], max_value)
            pbar_max = nrows * images_number
        else:
            pbar_max = images_number

        # Main iteration loop. Fills the rows of pcarray when reference
        # is stat
        with progressbar(total=pbar_max,
                         disable=not show_progressbar,
                         leave=True) as pbar:
            for i1, im in enumerate(self._iterate_signal()):
                if reference in ['current', 'cascade']:
                    if ref is None:
                        ref = im.copy()
                        shift = np.array([0, 0])
                    nshift, max_val = estimate_image_shift(
                        ref,
                        im,
                        roi=roi,
                        sobel=sobel,
                        medfilter=medfilter,
                        hanning=hanning,
                        plot=plot,
                        normalize_corr=normalize_corr,
                        dtype=dtype,
                        sub_pixel_factor=sub_pixel_factor)
                    if reference == 'cascade':
                        shift += nshift
                        ref = im.copy()
                    else:
                        shift = nshift
                    shifts.append(shift.copy())
                    pbar.update(1)
                elif reference == 'stat':
                    if i1 == nrows:
                        break
                    # Iterate to fill the columns of pcarray
                    for i2, im2 in enumerate(self._iterate_signal()):
                        if i2 > i1:
                            nshift, max_value = estimate_image_shift(
                                im,
                                im2,
                                roi=roi,
                                sobel=sobel,
                                medfilter=medfilter,
                                hanning=hanning,
                                normalize_corr=normalize_corr,
                                plot=plot,
                                dtype=dtype,
                                sub_pixel_factor=sub_pixel_factor)
                            pcarray[i1, i2] = max_value, nshift
                        del im2
                        pbar.update(1)
                    del im
        if reference == 'stat':
            # Select the reference image as the one that has the
            # higher max_value in the row
            sqpcarr = pcarray[:, :nrows]
            sqpcarr['max_value'][:] = symmetrize(sqpcarr['max_value'])
            sqpcarr['shift'][:] = antisymmetrize(sqpcarr['shift'])
            ref_index = np.argmax(pcarray['max_value'].min(1))
            self.ref_index = ref_index
            shifts = (pcarray['shift'] +
                      pcarray['shift'][ref_index, :nrows][:, np.newaxis])
            if correlation_threshold is not None:
                if correlation_threshold == 'auto':
                    correlation_threshold = \
                        (pcarray['max_value'].min(0)).max()
                    _logger.info("Correlation threshold = %1.2f",
                                 correlation_threshold)
                shifts[
                    pcarray['max_value'] < correlation_threshold] = ma.masked
                shifts.mask[ref_index, :] = False

            shifts = shifts.mean(0)
        else:
            shifts = np.array(shifts)
            del ref
        return shifts
Пример #33
0
    R = np.zeros((31, 31))
    for i in range(31):
        for j in range(31):
            #対角成分
            if j == i:
                #kが奇数の場合
                if k[i] % 2 != 0:
                    R[i][j] = 1.0
                #kが偶数の場合
                else:
                    R[i][j] = 4.0
            #非対角成分
            else:
                R[i][j] = 0

    #推定値x_hatの初期値(3*1)
    x_hat_init = np.zeros(3)

    #推定誤差共分散行列Pの初期値(3*3)
    Pk_init = np.zeros((3, 3))
    #対角成分を初期値を10^6にする(変更すると推定精度が変わる)
    np.fill_diagonal(Pk_init, 1000000)

    #プラント雑音共分散行列qの初期値(3*3)
    Qk_init = np.zeros((3, 3))
    #対角成分を初期値を10^6にする(変更すると推定精度が変わる)
    np.fill_diagonal(Qk_init, 0)

    print('Start Kalman Filtering')
    KalmanFiltering(x_hat_init, Pk_init, Z, A, R)
Пример #34
0
print(
    cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3,
                    scoring="accuracy"))

# 에러 분석
y_train_pred = cross_val_predict(sgd_clf, X_train, y_train, cv=3)
conf_mx = confusion_matrix(y_train, y_train_pred)
print(conf_mx)

plt.matshow(conf_mx, cmap=plt.cm.gray)
plt.show()

row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums

np.fill_diagonal(norm_conf_mx, 0)
plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
plt.show()

y_train_large = (y_train >= 7)
y_train_odd = (y_train % 2 == 1)
y_multilabel = np.c_[y_train_large, y_train_odd]

knn_clf = KNeighborsClassifier()
knn_clf.fit(X_train, y_multilabel)

print(knn_clf.predict([some_digit]))

y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_multilabel, cv=3)
print(f1_score(y_multilabel, y_train_knn_pred, average="macro"))
                           (idx + 1)] = NodeAttribute(ts_)
            npy_adjacency_matrix[:all_node_num, (all_node_num + n_expanded) *
                                 idx:(all_node_num + n_expanded) * idx +
                                 all_node_num] = get_adjacency_matrix(
                                     ts_, L, 'all')

        # t+1
        node_attribute[:pred_attribute[c_idx][ts].shape[0],
                       attribute_dim * (L - 1):] = pred_attribute[c_idx][ts]
        npy_adjacency_matrix[:, (all_node_num + n_expanded) *
                             (L - 1):] = pred_adjacency_matrix[ts][c_idx]

        lil_adjacency_matrix = lil_matrix(npy_adjacency_matrix)
        lil_node_attribute = lil_matrix(node_attribute)
        mmwrite(OutputDir[c_idx] + "/input/node_attribute/" + str(ts),
                lil_node_attribute)
        mmwrite(OutputDir[c_idx] + "/input/adjacency/" + str(ts),
                lil_adjacency_matrix)

        label = np.zeros(
            (all_node_num + n_expanded, all_node_num + n_expanded))
        label[:all_node_num, :all_node_num] = get_adjacency_matrix(
            ts_test, L, "disappeared")
        mmwrite(OutputDir[c_idx] + "/label/" + str(ts), lil_matrix(label))

        exist_matrix = get_exist_matrix(ts_train[-1])
        np.fill_diagonal(exist_matrix, 0)
        mask = np.zeros((all_node_num + n_expanded, all_node_num + n_expanded))
        mask[:all_node_num, :all_node_num] = get_adjacency_matrix(
            ts_train[-1], L, 'all')
        mmwrite(OutputDir[c_idx] + "/mask/" + str(ts), lil_matrix(mask))
Пример #36
0
def InitializeFlatGraph(n,
                        min_weight,
                        max_weight,
                        dropout=0,
                        seed=-1,
                        graph_type="complete",
                        intercalation=None,
                        modulo=None):
    if graph_type == "complete":
        if seed == -1:
            W = np.random.uniform(low=min_weight, high=max_weight, size=n)
        else:
            local_state = np.random.RandomState(seed)
            W = local_state.uniform(low=min_weight, high=max_weight, size=n)
        if dropout > 0:
            connected = False
            while connected == False:
                if seed == -1:
                    D = np.random.choice([0, 1],
                                         size=n,
                                         replace=True,
                                         p=[dropout, 1 - dropout])
                else:
                    D = local_state.choice([0, 1],
                                           size=n,
                                           replace=True,
                                           p=[dropout, 1 - dropout])
                # Check if the dropped out matrix is connected
                # Make it a symmetric matrix
                D_sym = SymmetricMatrix(D)
                # Check if it resembles a connected graph
                D_sym = nx.from_numpy_matrix(D_sym)
                if nx.is_connected(D_sym):
                    connected = True
            # Once we have a conencted graph, add weights
            W = np.multiply(W, D)
    elif graph_type == "cycle":
        if seed == -1:
            weights = np.random.uniform(low=min_weight,
                                        high=max_weight,
                                        size=n)
        else:
            local_state = np.random.RandomState(seed)
            weights = local_state.uniform(low=min_weight,
                                          high=max_weight,
                                          size=n)
        W = np.zeros((n, n))
        i = 0
        j = 1
        for k in range(n - 1):
            W[i, j] = weights[k]
            i += 1
            j += 1
        # Add last weight
        W[0, n - 1] = weights[n - 1]
        i_lower = np.tril_indices(n, -1)
        W[i_lower] = W.T[i_lower]
        # Make sure diagonal is empty
        np.fill_diagonal(W, 0)
        W = np.copy(W[np.triu_indices_from(W, k=1)])
    elif graph_type == "regular_log(n)":
        degseq = np.repeat(np.floor(np.log(n)), n)
        adjMatr = AdjacencyMatrixDegree(degseq)
        if seed == -1:
            W = np.random.uniform(low=min_weight, high=max_weight, size=n)
        else:
            local_state = np.random.RandomState(seed)
            W = local_state.uniform(low=min_weight, high=max_weight, size=n)
        W = np.multiply(W, adjMatr)
        W = np.copy(W[np.triu_indices_from(W, k=1)])
    elif graph_type == "intercalate":
        W = InitializeGraph(n, min_weight, max_weight, dropout, seed,
                            graph_type, intercalation)
        W = np.copy(W[np.triu_indices_from(W, k=1)])
    elif graph_type == "modulo":
        W = InitializeGraph(n, min_weight, max_weight, dropout, seed,
                            graph_type, None, modulo)
        W = np.copy(W[np.triu_indices_from(W, k=1)])
    return W
Пример #37
0
def InitializeGraph(n,
                    min_weight,
                    max_weight,
                    dropout,
                    seed=-1,
                    graph_type="complete",
                    intercalation=None,
                    modulo=None):
    # Randomness
    if graph_type == "complete":
        if seed == -1:
            W = np.random.uniform(low=min_weight, high=max_weight, size=(n, n))
        else:
            local_state = np.random.RandomState(seed)
            W = local_state.uniform(low=min_weight,
                                    high=max_weight,
                                    size=(n, n))
        # Dropout with connected
        if dropout > 0:
            connected = False
            while not connected:
                if seed == -1:
                    D = np.random.choice([0, 1],
                                         size=(n, n),
                                         replace=True,
                                         p=[dropout, 1 - dropout])
                else:
                    D = local_state.choice([0, 1],
                                           size=(n, n),
                                           replace=True,
                                           p=[dropout, 1 - dropout])
                W_dropped = np.multiply(W, D)

                # Check for connectivity
                i_lower = np.tril_indices(n, -1)
                W_dropped[i_lower] = W_dropped.T[i_lower]
                # Make sure diagonal is empty
                np.fill_diagonal(W_dropped, 0)
                W_sym = nx.from_numpy_matrix(W_dropped)
                if nx.is_connected(W_sym):
                    connected = True
                    W = np.copy(W_dropped)
    elif graph_type == "cycle":
        if seed == -1:
            weights = np.random.uniform(low=min_weight,
                                        high=max_weight,
                                        size=n)
        else:
            local_state = np.random.RandomState(seed)
            weights = local_state.uniform(low=min_weight,
                                          high=max_weight,
                                          size=(n, n))
        W = np.zeros((n, n))
        i = 0
        j = 1
        for k in range(n - 1):
            W[i, j] = weights[k]
            i += 1
            j += 1
        # Add last weight
        W[0, n - 1] = weights[n - 1]
    elif graph_type == "regular_log(n)":
        degseq = np.repeat(np.floor(np.log(n)), n)
        adjMatr = AdjacencyMatrixDegree(degseq)
        if seed == -1:
            W = np.random.uniform(low=min_weight, high=max_weight, size=n)
        else:
            local_state = np.random.RandomState(seed)
            W = local_state.uniform(low=min_weight, high=max_weight, size=n)
        W = np.multiply(W, adjMatr)
    elif graph_type == "intercalate":
        if seed == -1:
            weights = np.random.uniform(low=min_weight,
                                        high=max_weight,
                                        size=n)
        else:
            local_state = np.random.RandomState(seed)
            weights = local_state.uniform(low=min_weight,
                                          high=max_weight,
                                          size=(n, n))
        adjMatr = np.zeros((n, n))
        # Fill diagonals above the main diagonal
        for k in range(intercalation):
            for i in range(n - k - 1):
                adjMatr[i, i + k + 1] = 1
        # Fill upper right corner
        for j in range(intercalation):
            for i in range(j + 1):
                adjMatr[j - i, n - i - 1] = 1
        W = np.multiply(weights, adjMatr)
    elif graph_type == "modulo":
        if seed == -1:
            weights = np.random.uniform(low=min_weight,
                                        high=max_weight,
                                        size=n)
        else:
            local_state = np.random.RandomState(seed)
            weights = local_state.uniform(low=min_weight,
                                          high=max_weight,
                                          size=(n, n))
        adjMatr = np.zeros((n, n))
        # Create circle
        for i in range(n - 1):
            adjMatr[i, i + 1] = 1
        # Fill upper right corner
        adjMatr[0, n - 1] = 1
        # Add other connections
        # Fill diagonals above the main diagonal
        for i in range(n - modulo):
            adjMatr[i, i + modulo] = 1
        # Fill upper right corner
        for i in range(modulo):
            adjMatr[modulo - 1 - i, n - i - 1] = 1
        W = np.multiply(weights, adjMatr)
    # Stochastic Block Model
    elif graph_type == "assortative":
        communities = np.floor(n / 3)
        sizes = 3 * [int(communities)]
        # Check if we have all nodes:
        if 3 * communities < n:
            sizes[0] += 1
        if 3 * communities < n - 1:
            sizes[1] += 1
        probs = [[0.4, 0.02, 0.02], [0.02, 0.4, 0.02], [0.02, 0.02, 0.4]]
        g = nx.stochastic_block_model(sizes, probs, seed=0)
        W = nx.to_numpy_matrix(g)
    elif graph_type == "disassortative":
        communities = np.floor(n / 3)
        sizes = 3 * [int(communities)]
        # Check if we have all nodes:
        if 3 * communities < n:
            sizes[0] += 1
        if 3 * communities < n - 1:
            sizes[1] += 1
        probs = [[0.02, 0.08, 0.08], [0.08, 0.02, 0.08], [0.08, 0.08, 0.2]]
        g = nx.stochastic_block_model(sizes, probs, seed=0)
        W = nx.to_numpy_matrix(g)
    elif graph_type == "ordered":
        communities = np.floor(n / 3)
        sizes = 3 * [int(communities)]
        # Check if we have all nodes:
        if 3 * communities < n:
            sizes[0] += 1
        if 3 * communities < n - 1:
            sizes[1] += 1
        probs = [[0, 0.15, 0.15], [0.15, 0, 0.15], [0.15, 0.15, 0]]
        g = nx.stochastic_block_model(sizes, probs, seed=0)
        W = nx.to_numpy_matrix(g)
    elif graph_type == "core-periphery":
        communities = np.floor(n / 3)
        sizes = 3 * [int(communities)]
        # Check if we have all nodes:
        if 3 * communities < n:
            sizes[0] += 1
        if 3 * communities < n - 1:
            sizes[1] += 1
        probs = [[0.4, 0.15, 0.03], [0.15, 0.15, 0.03], [0.03, 0.03, 0.03]]
        g = nx.stochastic_block_model(sizes, probs, seed=0)
        W = nx.to_numpy_matrix(g)

    # Copy upper diagonal to lower diagonal (only the upper perturbation counts)
    i_lower = np.tril_indices(n, -1)
    W[i_lower] = W.T[i_lower]
    # Make sure diagonal is empty
    np.fill_diagonal(W, 0)

    return W
Пример #38
0
# 把测试集比例调整为0.8
from sklearn.metrics import precision_score

print(precision_score(y_test, y_predict, average='micro'))  #0.93115438108484

from sklearn.metrics import confusion_matrix

print(confusion_matrix(y_test, y_predict))
# [[147   0   1   0   0   1   0   0   0   0]
#  [  0 123   1   2   0   0   0   3   4  10]
#  [  0   0 134   1   0   0   0   0   1   0]
#  [  0   0   0 138   0   5   0   1   5   0]
#  [  2   5   0   0 139   0   0   3   0   1]
#  [  1   3   1   0   0 146   0   0   1   0]
#  [  0   2   0   0   0   1 131   0   2   0]
#  [  0   0   0   1   0   0   0 132   1   2]
#  [  1   9   2   3   2   4   0   0 115   4]
#  [  0   1   0   5   0   3   0   2   2 134]]

cfm = confusion_matrix(y_test, y_predict)
plt.matshow(cfm, cmap=plt.cm.gray)
plt.show()

#可看出有很多‘1’预测成了‘9’,很多‘8’预测成了‘1’
row_sums = np.sum(cfm, axis=1)
err_matrix = cfm / row_sums
np.fill_diagonal(err_matrix, 0)

plt.matshow(err_matrix, cmap=plt.cm.gray)
plt.show()
Пример #39
0
    def run(self):
        self.reg_cov = 1e-6 * np.identity(len(self.X[0]))
        x, y = np.meshgrid(np.sort(self.X[:, 0]), np.sort(self.X[:, 1]))
        self.XY = np.array([x.flatten(), y.flatten()]).T
        """ 1. Set the initial mu, covariance and pi values"""
        self.mu = np.random.randint(
            min(self.X[:, 0]),
            max(self.X[:, 0]),
            size=(self.number_of_sources, len(self.X[0]))
        )  # This is a nxm matrix since we assume n sources (n Gaussians) where each has m dimensions
        self.cov = np.zeros(
            (self.number_of_sources, len(X[0]), len(X[0]))
        )  # We need a nxmxm covariance matrix for each source since we have m features --> We create symmetric covariance matrices with ones on the digonal
        for dim in range(len(self.cov)):
            np.fill_diagonal(self.cov[dim], 5)

        self.pi = np.ones(
            self.number_of_sources) / self.number_of_sources  # Are "Fractions"
        log_likelihoods = [
        ]  # In this list we store the log likehoods per iteration and plot them in the end to check if
        # if we have converged
        """Plot the initial state"""
        fig = plt.figure(figsize=(10, 10))
        ax0 = fig.add_subplot(111)
        ax0.scatter(self.X[:, 0], self.X[:, 1])
        ax0.set_title('Initial state')
        for m, c in zip(self.mu, self.cov):
            c += self.reg_cov
            multi_normal = multivariate_normal(mean=m, cov=c)
            ax0.contour(np.sort(self.X[:, 0]),
                        np.sort(self.X[:, 1]),
                        multi_normal.pdf(self.XY).reshape(
                            len(self.X), len(self.X)),
                        colors='black',
                        alpha=0.3)
            ax0.scatter(m[0], m[1], c='grey', zorder=10, s=100)

        for i in range(self.iterations):
            """E Step"""
            r_ic = np.zeros((len(self.X), len(self.cov)))

            for m, co, p, r in zip(self.mu, self.cov, self.pi,
                                   range(len(r_ic[0]))):
                co += self.reg_cov
                mn = multivariate_normal(mean=m, cov=co)
                r_ic[:, r] = p * mn.pdf(self.X) / np.sum([
                    pi_c * multivariate_normal(mean=mu_c, cov=cov_c).pdf(X)
                    for pi_c, mu_c, cov_c in zip(self.pi, self.mu, self.cov +
                                                 self.reg_cov)
                ],
                                                         axis=0)
            """
            The above calculation of r_ic is not that obvious why I want to quickly derive what we have done above.
            First of all the nominator:
            We calculate for each source c which is defined by m,co and p for every instance x_i, the multivariate_normal.pdf() value.
            For each loop this gives us a 100x1 matrix (This value divided by the denominator is then assigned to r_ic[:,r] which is in 
            the end a 100x3 matrix).
            Second the denominator:
            What we do here is, we calculate the multivariate_normal.pdf() for every instance x_i for every source c which is defined by
            pi_c, mu_c, and cov_c and write this into a list. This gives us a 3x100 matrix where we have 100 entrances per source c.
            Now the formula wants us to add up the pdf() values given by the 3 sources for each x_i. Hence we sum up this list over axis=0.
            This gives us then a list with 100 entries.
            What we have now is FOR EACH LOOP a list with 100 entries in the nominator and a list with 100 entries in the denominator
            where each element is the pdf per class c for each instance x_i (nominator) respectively the summed pdf's of classes c for each 
            instance x_i. Consequently we can now divide the nominator by the denominator and have as result a list with 100 elements which we
            can then assign to r_ic[:,r] --> One row r per source c. In the end after we have done this for all three sources (three loops)
            and run from r==0 to r==2 we get a matrix with dimensionallity 100x3 which is exactly what we want.
            If we check the entries of r_ic we see that there mostly one element which is much larger than the other two. This is because
            every instance x_i is much closer to one of the three gaussians (that is, much more likely to come from this gaussian) than
            it is to the other two. That is practically speaing, r_ic gives us the fraction of the probability that x_i belongs to class
            c over the probability that x_i belonges to any of the classes c (Probability that x_i occurs given the 3 Gaussians).
            """
            """M Step"""

            # Calculate the new mean vector and new covariance matrices, based on the probable membership of the single x_i to classes c --> r_ic
            self.mu = []
            self.cov = []
            self.pi = []
            log_likelihood = []

            for c in range(len(r_ic[0])):
                m_c = np.sum(r_ic[:, c], axis=0)
                mu_c = (1 / m_c) * np.sum(
                    self.X * r_ic[:, c].reshape(len(self.X), 1), axis=0)
                self.mu.append(mu_c)

                # Calculate the covariance matrix per source based on the new mean
                self.cov.append(((1 / m_c) * np.dot(
                    (np.array(r_ic[:, c]).reshape(len(self.X), 1) *
                     (self.X - mu_c)).T, (self.X - mu_c))) + self.reg_cov)
                # Calculate pi_new which is the "fraction of points" respectively the fraction of the probability assigned to each source
                self.pi.append(
                    m_c / np.sum(r_ic)
                )  # Here np.sum(r_ic) gives as result the number of instances. This is logical since we know
                # that the columns of each row of r_ic adds up to 1. Since we add up all elements, we sum up all
                # columns per row which gives 1 and then all rows which gives then the number of instances (rows)
                # in X --> Since pi_new contains the fractions of datapoints, assigned to the sources c,
                # The elements in pi_new must add up to 1
            """Log likelihood"""
            log_likelihoods.append(
                np.log(
                    np.sum([
                        k * multivariate_normal(self.mu[i], self.cov[j]).pdf(X)
                        for k, i, j in zip(self.pi, range(len(self.mu)),
                                           range(len(self.cov)))
                    ])))

            print(log_likelihoods)
            """
            This process of E step followed by a M step is now iterated a number of n times. In the second step for instance,
            we use the calculated pi_new, mu_new and cov_new to calculate the new r_ic which are then used in the second M step
            to calculat the mu_new2 and cov_new2 and so on....
            """

        fig2 = plt.figure(figsize=(10, 10))
        ax1 = fig2.add_subplot(111)
        ax1.set_title('Log-Likelihood')
        ax1.plot(range(0, self.iterations, 1), log_likelihoods)
Пример #40
0
    def _srm(self, data):
        """Expectation-Maximization algorithm for fitting the probabilistic SRM.

        Parameters
        ----------

        data : list of 2D arrays, element i has shape=[voxels_i, samples]
            Each element in the list contains the fMRI data of one subject.


        Returns
        -------

        sigma_s : array, shape=[features, features]
            The covariance :math:`\\Sigma_s` of the shared response Normal
            distribution.

        w : list of array, element i has shape=[voxels_i, features]
            The orthogonal transforms (mappings) :math:`W_i` for each subject.

        mu : list of array, element i has shape=[voxels_i]
            The voxel means :math:`\\mu_i` over the samples for each subject.

        rho2 : array, shape=[subjects]
            The estimated noise variance :math:`\\rho_i^2` for each subject

        s : array, shape=[features, samples]
            The shared response.
        """

        samples = min([d.shape[1] for d in data if d is not None], default=sys.maxsize)
        subjects = len(data)
        self.random_state_ = np.random.RandomState(self.rand_seed)
        random_states = [
            np.random.RandomState(
                self.random_state_.randint(2 ** 32 - 1, dtype=np.int64)
            )
            for i in range(len(data))
        ]

        # Initialization step: initialize the outputs with initial values,
        # voxels with the number of voxels in each subject, and trace_xtx with
        # the ||X_i||_F^2 of each subject.
        w, voxels = _init_w_transforms(data, self.features, random_states)
        x, mu, rho2, trace_xtx = self._init_structures(data, subjects)
        shared_response = np.zeros((self.features, samples))
        sigma_s = np.identity(self.features)

        # Main loop of the algorithm (run
        for iteration in range(self.n_iter):
            logger.info("Iteration %d" % (iteration + 1))

            # E-step:

            # Sum the inverted the rho2 elements for computing W^T * Psi^-1 * W
            rho0 = (1 / rho2).sum()

            # Invert Sigma_s using Cholesky factorization
            (chol_sigma_s, lower_sigma_s) = scipy.linalg.cho_factor(
                sigma_s, check_finite=False
            )
            inv_sigma_s = scipy.linalg.cho_solve(
                (chol_sigma_s, lower_sigma_s),
                np.identity(self.features),
                check_finite=False,
            )

            # Invert (Sigma_s + rho_0 * I) using Cholesky factorization
            sigma_s_rhos = inv_sigma_s + np.identity(self.features) * rho0
            chol_sigma_s_rhos, lower_sigma_s_rhos = scipy.linalg.cho_factor(
                sigma_s_rhos, check_finite=False
            )
            inv_sigma_s_rhos = scipy.linalg.cho_solve(
                (chol_sigma_s_rhos, lower_sigma_s_rhos),
                np.identity(self.features),
                check_finite=False,
            )

            # Compute the sum of W_i^T * rho_i^-2 * X_i, and the sum of traces
            # of X_i^T * rho_i^-2 * X_i
            wt_invpsi_x = np.zeros((self.features, samples))
            trace_xt_invsigma2_x = 0.0
            for subject in range(subjects):
                if data[subject] is not None:
                    wt_invpsi_x += (w[subject].T.dot(x[subject])) / rho2[subject]
                    trace_xt_invsigma2_x += trace_xtx[subject] / rho2[subject]

            log_det_psi = np.sum(np.log(rho2) * voxels)

            # Update the shared response
            shared_response = sigma_s.dot(
                np.identity(self.features) - rho0 * inv_sigma_s_rhos
            ).dot(wt_invpsi_x)

            # M-step

            # Update Sigma_s and compute its trace
            sigma_s = (
                inv_sigma_s_rhos + shared_response.dot(shared_response.T) / samples
            )
            trace_sigma_s = samples * np.trace(sigma_s)

            # Update each subject's mapping transform W_i and error variance
            # rho_i^2
            for subject in range(subjects):
                if x[subject] is not None:
                    a_subject = x[subject].dot(shared_response.T)
                    perturbation = np.zeros(a_subject.shape)
                    np.fill_diagonal(perturbation, 0.001)
                    u_subject, s_subject, v_subject = np.linalg.svd(
                        a_subject + perturbation, full_matrices=False
                    )
                    w[subject] = u_subject.dot(v_subject)
                    rho2[subject] = trace_xtx[subject]
                    rho2[subject] += -2 * np.sum(w[subject] * a_subject).sum()
                    rho2[subject] += trace_sigma_s
                    rho2[subject] /= samples * voxels[subject]
                else:
                    rho2[subject] = 0
            if logger.isEnabledFor(logging.INFO):
                # Calculate and log the current log-likelihood for checking
                # convergence
                loglike = self._likelihood(
                    chol_sigma_s_rhos,
                    log_det_psi,
                    chol_sigma_s,
                    trace_xt_invsigma2_x,
                    inv_sigma_s_rhos,
                    wt_invpsi_x,
                    samples,
                )
                logger.info("Objective function %f" % loglike)

        return sigma_s, w, mu, rho2, shared_response
Пример #41
0
# Nodal Admitance Matrix
Ybus = np.zeros((len(buses.keys()), len(buses.keys())), dtype=complex)

# Shunt Elements Vector
Bshunt = np.zeros(len(buses), dtype=complex)

for key in lines.keys():
    Ybus[lines[key].origin - 1][lines[key].destiny -
                                1] = -1 / (lines[key].R + 1j * lines[key].X)
    Bshunt[lines[key].origin - 1] += 1j * lines[key].B / 2
    Bshunt[lines[key].destiny - 1] += 1j * lines[key].B / 2

Ybus += Ybus.T

np.fill_diagonal(Ybus, Bshunt - np.sum(Ybus, axis=1))

# Create Jacobian Matrix
H = np.zeros((len(measures.keys()), len(buses.keys())))

# Create measurement vector
z = np.zeros((len(measures.keys()), 1))

# Create weight matrix
W = np.zeros((len(measures.keys()), len(measures.keys())))

# Fill Jacobian matrix, measurement vector and weight matrix
aux = 0
for key in measures.keys():

    # Fill Jacobian matrix
Пример #42
0
import scipy.sparse.linalg as splg

N = 32

h = 1 / N
"""Create A"""
Ih = np.identity(N + 1)
Ih[0, 0] = 0
Ih[N, N] = 0

vdiag = 4 * np.ones(N - 1)
voffs = -1 * np.ones(N - 2)
zeros = np.zeros((1, N - 1))

Th = np.diag(vdiag)
np.fill_diagonal(Th[:, 1:], voffs)
np.fill_diagonal(Th[1:], voffs)

Th = np.block([[h**2, zeros, 0], [zeros.T, Th, zeros.T], [0, zeros, h**2]])
#print(Th)

zN = np.zeros((N + 1, (N + 1)**2 - (N + 1)))
zShort = np.zeros((N + 1, N + 1))
#print(np.shape(zN))
I = h**2 * np.identity(N + 1)
block1 = np.block([[I, zN]])
block2 = np.block([[zShort, Th, -Ih, zN[:, 2 * (N + 1):]]])

A = np.block([[block1], [block2]])

for i in range(3, N):
Пример #43
0
def calc_grid_probs(grid, grid_n_x, grid_n_y, Suscept, kernel_function, \
    d_perimeter_x, d_perimeter_y):
    """
    Calculate the probability of infection from one grid square to the next.
    (quicker than version above)
    
    Notes
    -----
    Grid and Suscept MUST be ordered according to grid number!  
    
    Could also use pairwise chebyshev distances (scipy.spatial.distance.cdist)
    instead of using ogrid etc (below) if the x/y lengths of the grid cells
    are the same.  
    
    
    Parameters
    ----------
        grid: np.array
    
        grid_n_x: np.array
    
        grid_n_y: np.array
    
        Suscept: np.array
    
        kernel_function: function
    
        d_perimeter_x : float
        
        d_perimeter_y : float
        
        
    Returns
    -------
        
        MaxRate
        
        Num
        
        first_
        
        last_
        
        max_sus
        
        Dist2all
        
        KDist2all
    
    
    Example
    -------
    # Calculate grid-level probabilities of infection for the UK data
    
    import epi_speed_test.py.fmd as fmd
    kernel = fmd.UKKernel
    out = calc_grid_probs(grid, 100, 100, np.random.rand(len(grid)), kernel, \
        np.ptp(demo.easting.values), np.ptp(demo.northing.values))
    
    
    Updates
    -------
    
    # 6th Oct 2014 Changed use of grid.max()+1 to grid_n_x*grid_n_y, WP
    # Feb 2017 Changed to use np.ogrid etc, WP
    """
    
    NG = grid_n_x*grid_n_y
    
    # Find the farm indices per grid square
    fpg = [np.where(grid == i)[0] for i in range(NG)]
    # Perhaps be made quicker by only looking at the grids with farms in them.
    
    # Maximum susceptibility of a feedlot per grid square (0 for empty grid)
    max_sus = np.asarray([np.max(Suscept[x]) if len(x) > 0 else 0 for x in fpg])
    
    # Find the number of farms per grid square
    Num = np.array([len(i) for i in fpg])
    
    # First farm in grid, -1 if the list is empty
    first_ = np.asarray([x[0] if len(x) > 0 else -1 for x in fpg])
    
    # Last farm in grid, -2 if the list is empty
    last_ = np.asarray([x[-1] if len(x) > 0 else -2 for x in fpg])
    
    NCOL = float(grid_n_x); NROW = float(grid_n_y)
    HEIGHT = d_perimeter_y/NROW; WIDTH = d_perimeter_x/NCOL
    
    # Calculate the minimum sq-distance between each grid square
    # X and Y position of each grid square
    Y, X = np.ogrid[0:NROW*NCOL, 0:NROW*NCOL]
    HDIST = np.abs(Y%NCOL - X%NCOL) - 1
    VDIST = np.abs(Y/NCOL - X/NCOL) - 1
    HDIST[HDIST < 0] = 0; VDIST[VDIST < 0] = 0
    Dist2all = np.add((HDIST*WIDTH)**2, (VDIST*HEIGHT)**2)
    
    KDist2all = kernel_function(Dist2all) # very slow
    
    # Could perhaps use Numpy broadcasting for this ... 
    MaxRate = np.multiply(KDist2all, np.tile(max_sus, (grid_n_x*grid_n_y, 1)))
    
    # All grids with no farms should have infinite rate of input infection
    # All grids with no farms should have infinite rate of susceptibility
    MaxRate[Num == 0] = np.inf
    MaxRate[:, Num == 0] = np.inf
    
    # Set rate from one grid to itself to infinity.  
    np.fill_diagonal(MaxRate, np.inf)
    
    return(MaxRate, Num, first_, last_, max_sus, Dist2all, KDist2all)
Пример #44
0
cm = confusion_matrix(y_train, y_train_pred)
cm

plt.matshow(cm, cmap=plt.cm.gray)
plt.show()

# %%
"""
Normalize the confusion matrix
"""

row_sums = cm.sum(axis=1, keepdims=True)
norm_cm = cm / row_sums

# Remove the diagnols to keep only the errors
np.fill_diagonal(norm_cm, 0)
plt.matshow(norm_cm, cmap=plt.cm.gray)
plt.show()

# %%
"""
Multilable classification

KNeighborsClassifier

f1_score -- assumes all labels are equally important and hence simple average
other option average="weighted" -- to give each label a weight equal to its support (i.e., the number of instances with that
target label)
"""
from sklearn.neighbors import KNeighborsClassifier
y_train_large = (y_train >= 7)
Пример #45
0
x = np.array([[1, 3], [2, 4]])
y = np.array([[1, 0], [0, 1]])
z = np.array([[x, x], [y, y]])

#2
a @ a.T
np.add(b, a.T)
A @ A.T
A**3
A @ B

#3
A = np.random.randn(10, 5)
B = np.random.randn(5, 10)
C = A @ B
np.fill_diagonal(C, 1)

#4
D = np.tile(A, (2, 3))
E = np.tile(B, (3, 2))
F = D @ E
F[F <= 0.5] = 0

#5
np.reshape(D, (3, 10, 10))
np.reshape(E, (3, 10, 10))

#6
x = np.arange(100.0)
np.reshape(x, (10, 10))
np.reshape(x, (20, 5))
Пример #46
0
#remove the small streamlines
streamlines_cut = lambda: (sl for sl in streamlines if len(sl) > 1)
streamlines = Streamlines(streamlines_cut())

print runno + ' streamlines paparation finished'

# In[5]:

#build the connectivity matrix
M = utils.connectivity_matrix(streamlines,
                              labels_,
                              affine=affine_labels,
                              return_mapping=False,
                              mapping_as_streamlines=False)
#fill dialgonal with 0
np.fill_diagonal(M, 0)

print runno + ' connectivity matrix building finished'

# In[20]:

M = M[1:, 1:]

# In[22]:

plt.imshow(M)

# In[11]:

np.diag(M)
Пример #47
0
    def sampler(self, w_limit, tau_limit, file):

        start = time.time()

        # ------------------- initialize MCMC
        testsize = self.testdata.shape[0]
        trainsize = self.traindata.shape[0]
        samples = self.samples

        self.sgd_depth = 1
        learn_rate = 0.5

        x_test = np.linspace(0, 1, num=testsize)
        x_train = np.linspace(0, 1, num=trainsize)

        Netlist = [None] * 10

        netsize = np.zeros(self.subtasks, dtype=np.int)

        for n in xrange(0, self.subtasks):
            module = self.mtaskNet[n]
            trdata = self.taskdata(
                self.traindata, module[0],
                module[2])  # make the partitions for task data
            testdata = self.taskdata(self.testdata, module[0], module[2])
            Netlist[n] = Network(self.mtaskNet[n], trdata, testdata,
                                 learn_rate)
            # print("Size: "+str(n)+" " + str(trdata)+ " " + str(module[0]))
            # print trdata

        for n in xrange(0, self.subtasks):
            netw = Netlist[n].Top
            netsize[n] = self.net_size(netw)  # num of weights and bias
            # print(netsize[n])

        y_test = self.testdata[:, netw[0]:]
        y_train = self.traindata[:, netw[0]:]

        pos_w = np.ones(
            (samples, self.subtasks,
             netsize[self.subtasks -
                     1]))  # posterior of all weights and bias over all samples
        pos_tau = np.ones((samples, self.subtasks))

        fxtrain_samples = np.ones(
            (samples, self.subtasks, trainsize,
             netw[2]))  # fx of train data over all samples
        fxtest_samples = np.ones((samples, self.subtasks, testsize,
                                  netw[2]))  # fx of test data over all samples
        rmse_train = np.zeros((samples, self.subtasks))
        rmse_test = np.zeros((samples, self.subtasks))

        w = np.random.randn(self.subtasks, netsize[self.subtasks - 1])
        w_proposal = np.random.randn(self.subtasks, netsize[self.subtasks - 1])
        w_gd = np.random.randn(self.subtasks, netsize[self.subtasks - 1])
        w_prop_gd = np.random.randn(self.subtasks, netsize[self.subtasks - 1])

        # step_w = 0.05;  # defines how much variation you need in changes to w
        # step_eta = 0.2; # exp 0

        step_w = w_limit  # defines how much variation you need in changes to w
        step_eta = tau_limit  # exp 1
        # --------------------- Declare FNN and initialize

        pred_train = Netlist[0].evaluate_proposal(self.traindata,
                                                  w[0, :netsize[0]],
                                                  self.mtaskNet, 0)
        pred_test = Netlist[0].evaluate_proposal(self.testdata,
                                                 w[0, :netsize[0]],
                                                 self.mtaskNet, 0)
        rmsetrain = np.zeros(self.subtasks)
        rmsetest = np.zeros(self.subtasks)

        eta = np.log(np.var(pred_train - y_train))
        tau_pro = np.exp(eta)

        pred_train = np.zeros((self.subtasks, trainsize, Netlist[0].Top[2]))
        pred_test = np.zeros((self.subtasks, testsize, Netlist[0].Top[2]))

        sigma_squared = 25
        nu_1 = 0
        nu_2 = 0

        sigma_diagmat = np.zeros(
            (self.subtasks, netsize[self.subtasks - 1],
             netsize[self.subtasks -
                     1]))  # for Equation 9 in Ref [Chandra_ICONIP2017]
        for s in xrange(self.subtasks):
            np.fill_diagonal(sigma_diagmat[s], step_w)

        delta_likelihood = 0.5  # an arbitrary position

        likelihood = np.zeros(self.subtasks)
        likelihood_proposal = np.zeros(self.subtasks)
        likelihood_ignore = np.zeros(self.subtasks)

        prior_current = np.zeros(self.subtasks)

        prior_pro = np.zeros(self.subtasks)

        for n in xrange(0, self.subtasks):
            prior_current[n] = self.prior_likelihood(
                sigma_squared, nu_1, nu_2, w[n, :netsize[0]], tau_pro,
                Netlist[n].Top)  # takes care of the gradients

        mh_prob = np.zeros(self.subtasks)

        for s in xrange(0, self.subtasks - 1):
            [likelihood[s], fxtrain_samples[0, s, :], rmse_train[0, s]
             ] = self.likelihood_func(Netlist[s], self.traindata,
                                      w[s, :netsize[s]], tau_pro, s)
            [likelihood_ignore, fxtest_samples[0, s, :],
             rmse_test[0, s]] = self.likelihood_func(Netlist[s], self.testdata,
                                                     w[s, :netsize[s]],
                                                     tau_pro, s)
            w[s + 1, :netsize[s]] = w[s, :netsize[s]]

        s = self.subtasks - 1
        [likelihood[s], fxtrain_samples[0, s, :],
         rmse_train[0,
                    s]] = self.likelihood_func(Netlist[s], self.traindata,
                                               w[s, :netsize[s]], tau_pro, s)
        # print likelihood

        naccept = 0
        # print 'begin sampling using mcmc random walk'
        # plt.plot(x_train, y_train)
        # plt.plot(x_train, pred_train)
        # plt.title("Plot of Data vs Initial Fx")
        # plt.savefig('mcmcresults/begin.png')
        # plt.clf()

        # plt.plot(x_train, y_train)

        diff_prop = np.zeros(self.subtasks)
        diff = np.zeros(self.subtasks)
        prior_prop = np.zeros(self.subtasks)

        for i in range(1, samples - 1):
            # print i
            for s in xrange(self.subtasks):
                # print("B2:")
                # print(Netlist[s].B2.shape)
                # print(netsize[s], Netlist[s].Top)
                w_gd[s, :netsize[s]] = Netlist[s].langevin_gradient(
                    self.traindata, w[s, :netsize[s]].copy(), self.sgd_depth,
                    self.mtaskNet, s)  # Eq 8
                w_proposal[
                    s, :netsize[s]] = w_gd[s, :netsize[s]] + np.random.normal(
                        0, step_w, netsize[s])  # Eq 7
                w_prop_gd[s, :netsize[s]] = Netlist[s].langevin_gradient(
                    self.traindata, w_proposal[s].copy(), self.sgd_depth,
                    self.mtaskNet, s)

            # print(multivariate_normal.pdf(w, w_prop_gd, sigma_diagmat),multivariate_normal.pdf(w_proposal, w_gd, sigma_diagmat))

            for s in xrange(self.subtasks):
                diff_prop[s] = np.log(
                    multivariate_normal.pdf(
                        w[s, :netsize[s]], w_prop_gd[s, :netsize[s]],
                        sigma_diagmat[s, :netsize[s], :netsize[s]]) -
                    np.log(
                        multivariate_normal.pdf(
                            w_proposal[s, :netsize[s]], w_gd[s, :netsize[s]],
                            sigma_diagmat[s, :netsize[s], :netsize[s]])))

            eta_pro = eta + np.random.normal(0, step_eta, 1)
            tau_pro = math.exp(eta_pro)

            for s in xrange(self.subtasks - 1):
                [likelihood_proposal[s], pred_train[s, :], rmsetrain[s]
                 ] = self.likelihood_func(Netlist[s], self.traindata,
                                          w_proposal[s, :netsize[s]], tau_pro,
                                          s)

                [_, pred_test[s, :], rmsetest[s]
                 ] = self.likelihood_func(Netlist[s], self.testdata,
                                          w_proposal[s, :netsize[s]], tau_pro,
                                          s)
                w_proposal[s + 1, :netsize[s]] = w_proposal[s, :netsize[s]]

            s = self.subtasks - 1
            [likelihood_proposal[s], pred_train[s, :],
             rmsetrain[s]] = self.likelihood_func(Netlist[s], self.traindata,
                                                  w_proposal[s, :netsize[s]],
                                                  tau_pro, s)
            [_, pred_test[s, :],
             rmsetest[s]] = self.likelihood_func(Netlist[s], self.testdata,
                                                 w_proposal[s, :netsize[s]],
                                                 tau_pro, s)

            # likelihood_ignore  refers to parameter that will not be used in the alg.

            for s in xrange(self.subtasks):
                prior_prop[s] = self.prior_likelihood(
                    sigma_squared, nu_1, nu_2, w_proposal[s, :netsize[s]],
                    tau_pro, Netlist[s].Top)  # takes care of the gradients

            diff_prior = prior_prop - prior_current
            diff_likelihood = likelihood_proposal - likelihood

            for s in xrange(self.subtasks):
                diff[s] = min(
                    700, diff_prior[s] + diff_likelihood[s] + diff_prop[s])
                mh_prob[s] = min(1, math.exp(diff[s]))
                # print()
                # print(diff, i)

                # print(mh_prob)

                u = random.uniform(0, 1)

                if u < mh_prob[s]:
                    # Update position
                    # print    i, ' is accepted sample'
                    naccept += 1
                    likelihood[s] = likelihood_proposal[s]
                    prior_current[s] = prior_prop[s]
                    w[s, :netsize[s]] = w_proposal[s, :netsize[s]]
                    eta = eta_pro

                    elapsed_time = ":".join(
                        covert_time(int(time.time() - start)))
                    # sys.stdout.write('\r' + file + ' : ' + str(round(float(i) / (samples - 1) * 100, 2)) + '% complete....'+" time elapsed: " + elapsed_time)
                    # print  likelihood, prior_current, diff_prop, rmsetrain, rmsetest, w, 'accepted'
                    # print w_proposal, 'w_proposal'
                    # print w_gd, 'w_gd'

                    # print w_prop_gd, 'w_prop_gd'

                    pos_w[i + 1, s, :netsize[s]] = w_proposal[s, :netsize[s]]
                    pos_tau[i + 1] = tau_pro
                    fxtrain_samples[i + 1, s, :] = pred_train[s, :]
                    fxtest_samples[i + 1, s, :] = pred_test[s, :]
                    rmse_train[i + 1, s] = rmsetrain[s]
                    rmse_test[i + 1, s] = rmsetest[s]

                    print i, 'accepted'
                    # plt.plot(x_train, pred_train)

                else:
                    pos_w[i + 1, s, :netsize[s]] = pos_w[i, s, :netsize[s]]
                    pos_tau[i + 1, s] = pos_tau[i, s]
                    fxtrain_samples[i + 1, s, :] = fxtrain_samples[i, s, :]
                    fxtest_samples[i + 1, s, :] = fxtest_samples[i, s, :]
                    rmse_train[i + 1, s] = rmse_train[i, s]
                    rmse_test[i + 1, s] = rmse_test[i, s]

                    print i, 'rejected and retained'
        sys.stdout.write('\r' + file + ' : 100% ..... Total Time: ' +
                         ":".join(covert_time(int(time.time() - start))))
        # print naccept, ' num accepted'
        # print naccept / (samples * 1.0), '% was accepted'
        accept_ratio = naccept / (samples * self.subtasks * 1.0) * 100

        # plt.title("Plot of Accepted Proposals")
        # plt.savefig('mcmcresults/proposals.png')
        # plt.savefig('mcmcresults/proposals.svg', format='svg', dpi=600)
        # plt.clf()

        return (pos_w, pos_tau, fxtrain_samples, fxtest_samples, x_train,
                x_test, rmse_train, rmse_test, accept_ratio)
Пример #48
0
 def compute_cim_coefficients(self) -> None:
     """Compute the coefficients of the matrix _cim by using the following equality q_xx' = M[x, x'] / T[x].
     The class member ``_cim`` will contain the computed cim
     """
     np.fill_diagonal(self._cim, self._cim.diagonal() * -1)
     self._cim = ((self._cim.T + 1) / (self._state_residence_times + 1)).T
Пример #49
0
mmc = conf_in.mean()
ece, mce = get_calib(py_in, targets)
save_res_ood(tab_ood['MNIST - MNIST'], mmc)
save_res_cal(tab_cal['MAP'], ece, mce)
print(
    f'[In, MAP] Time: NA/{time_pred:.1f}s; Accuracy: {acc_in:.3f}; ECE: {ece:.3f}; MCE: {mce:.3f}; MMC: {mmc:.3f}'
)

# ----------------------------------------------------------------------
# Make a binary classification dataset
if args.generate_confmat:
    from sklearn.metrics import confusion_matrix

    C = confusion_matrix(targets, py_in.argmax(1))
    print(C)
    np.fill_diagonal(C, -1)
    max_idx = np.unravel_index(C.argmax(), shape=C.shape)
    max_val = C.max()
    print(f'Most confused index: {max_idx}, value: {max_val}')

    sys.exit()
# ----------------------------------------------------------------------

# Out-distribution - EMNIST
py_out = predict(test_loader_EMNIST, model).cpu().numpy()
conf_emnist = get_confidence(py_out)
mmc = conf_emnist.mean()
auroc = get_auroc(py_in, py_out)
save_res_ood(tab_ood['MNIST - EMNIST'], mmc, auroc)
print(f'[Out-EMNIST, MAP] MMC: {mmc:.3f}; AUROC: {auroc:.3f}')
Пример #50
0
data['gabamat'] = np.zeros((data['n_n'], data['n_n']))

n_n = data['n_n']
l_n = int(0.25 * n_n)
p_n = int(0.75 * n_n)

ach_mat = np.zeros((n_n, n_n))
ach_mat[p_n:, :p_n] = np.random.choice([0., 1.],
                                       size=(l_n, p_n),
                                       p=(1 - data['PNLN'],
                                          data['PNLN']))  # PN->LN
ach_mat[:p_n, :p_n] = np.random.choice([0., 1.],
                                       size=(p_n, p_n),
                                       p=(1 - data['PNPN'],
                                          data['PNPN']))  # PN->PN
np.fill_diagonal(ach_mat, 0.)
data['achmat'] = ach_mat

gaba_mat = np.zeros((n_n, n_n))
gaba_mat[:p_n,
         p_n:] = np.random.choice([0., 1.],
                                  size=(p_n, l_n),
                                  p=(1 - data['LNPN'], data['LNPN']))  # LN->PN
gaba_mat[p_n:,
         p_n:] = np.random.choice([0., 1.],
                                  size=(l_n, l_n),
                                  p=(1 - data['LNLN'], data['LNLN']))  # LN->LN
np.fill_diagonal(gaba_mat, 0.)
data['gabamat'] = gaba_mat

with open('model.pkl', 'wb') as fp:
Пример #51
0
def get_neighb_matrix(coords, resolution=500, size=1):
    D = cdist(coords, coords, 'euclidean')
    lim = np.ceil(size*resolution*np.sqrt(2))
    NM = (D < lim).astype(int)
    np.fill_diagonal(NM, 0)
    return NM
Пример #52
0
def diagadd(m, v):
    """Add a vector to the diagonal of a matrix"""
    np.fill_diagonal(m, m.diagonal() + v)
Пример #53
0
def _linprog_simplex(c,
                     A_ub=None,
                     b_ub=None,
                     A_eq=None,
                     b_eq=None,
                     bounds=None,
                     maxiter=1000,
                     disp=False,
                     callback=None,
                     tol=1.0E-12,
                     bland=False,
                     **unknown_options):
    """
    Solve the following linear programming problem via a two-phase
    simplex algorithm.

    maximize:     c^T * x

    subject to:   A_ub * x <= b_ub
                  A_eq * x == b_eq

    Parameters
    ----------
    c : array_like
        Coefficients of the linear objective function to be maximized.
    A_ub : array_like
        2-D array which, when matrix-multiplied by x, gives the values of the
        upper-bound inequality constraints at x.
    b_ub : array_like
        1-D array of values representing the upper-bound of each inequality
        constraint (row) in A_ub.
    A_eq : array_like
        2-D array which, when matrix-multiplied by x, gives the values of the
        equality constraints at x.
    b_eq : array_like
        1-D array of values representing the RHS of each equality constraint
        (row) in A_eq.
    bounds : array_like
        The bounds for each independent variable in the solution, which can take
        one of three forms::
        None : The default bounds, all variables are non-negative.
        (lb, ub) : If a 2-element sequence is provided, the same
                  lower bound (lb) and upper bound (ub) will be applied
                  to all variables.
        [(lb_0, ub_0), (lb_1, ub_1), ...] : If an n x 2 sequence is provided,
                  each variable x_i will be bounded by lb[i] and ub[i].
        Infinite bounds are specified using -np.inf (negative)
        or np.inf (positive).
    callback : callable
        If a callback function is provide, it will be called within each
        iteration of the simplex algorithm. The callback must have the
        signature `callback(xk, **kwargs)` where xk is the current solution
        vector and kwargs is a dictionary containing the following::
        "tableau" : The current Simplex algorithm tableau
        "nit" : The current iteration.
        "pivot" : The pivot (row, column) used for the next iteration.
        "phase" : Whether the algorithm is in Phase 1 or Phase 2.
        "bv" : A structured array containing a string representation of each
               basic variable and its current value.

    Options
    -------
    maxiter : int
       The maximum number of iterations to perform.
    disp : bool
        If True, print exit status message to sys.stdout
    tol : float
        The tolerance which determines when a solution is "close enough" to zero
        in Phase 1 to be considered a basic feasible solution or close enough
        to positive to to serve as an optimal solution.
    bland : bool
        If True, use Bland's anti-cycling rule [3] to choose pivots to
        prevent cycling.  If False, choose pivots which should lead to a
        converged solution more quickly.  The latter method is subject to
        cycling (non-convergence) in rare instances.

    Returns
    -------
    A scipy.optimize.OptimizeResult consisting of the following fields::
        x : ndarray
            The independent variable vector which optimizes the linear
            programming problem.
        slack : ndarray
            The values of the slack variables.  Each slack variable corresponds
            to an inequality constraint.  If the slack is zero, then the
            corresponding constraint is active.
        success : bool
            Returns True if the algorithm succeeded in finding an optimal
            solution.
        status : int
            An integer representing the exit status of the optimization::
             0 : Optimization terminated successfully
             1 : Iteration limit reached
             2 : Problem appears to be infeasible
             3 : Problem appears to be unbounded
        nit : int
            The number of iterations performed.
        message : str
            A string descriptor of the exit status of the optimization.

    Examples
    --------
    Consider the following problem:

    Minimize: f = -1*x[0] + 4*x[1]

    Subject to: -3*x[0] + 1*x[1] <= 6
                 1*x[0] + 2*x[1] <= 4
                            x[1] >= -3

    where:  -inf <= x[0] <= inf

    This problem deviates from the standard linear programming problem.  In
    standard form, linear programming problems assume the variables x are
    non-negative.  Since the variables don't have standard bounds where
    0 <= x <= inf, the bounds of the variables must be explicitly set.

    There are two upper-bound constraints, which can be expressed as

    dot(A_ub, x) <= b_ub

    The input for this problem is as follows:

    >>> from scipy.optimize import linprog
    >>> c = [-1, 4]
    >>> A = [[-3, 1], [1, 2]]
    >>> b = [6, 4]
    >>> x0_bnds = (None, None)
    >>> x1_bnds = (-3, None)
    >>> res = linprog(c, A, b, bounds=(x0_bnds, x1_bnds))
    >>> print(res)
         fun: -22.0
     message: 'Optimization terminated successfully.'
         nit: 1
       slack: array([ 39.,   0.])
      status: 0
     success: True
           x: array([ 10.,  -3.])

    References
    ----------
    .. [1] Dantzig, George B., Linear programming and extensions. Rand
           Corporation Research Study Princeton Univ. Press, Princeton, NJ, 1963
    .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
           Mathematical Programming", McGraw-Hill, Chapter 4.
    .. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
           Mathematics of Operations Research (2), 1977: pp. 103-107.
    """
    _check_unknown_options(unknown_options)

    status = 0
    messages = {
        0: "Optimization terminated successfully.",
        1: "Iteration limit reached.",
        2: "Optimization failed. Unable to find a feasible"
        " starting point.",
        3: "Optimization failed. The problem appears to be unbounded.",
        4: "Optimization failed. Singular matrix encountered."
    }
    have_floor_variable = False

    cc = np.asarray(c)

    # The initial value of the objective function element in the tableau
    f0 = 0

    # The number of variables as given by c
    n = len(c)

    # Convert the input arguments to arrays (sized to zero if not provided)
    Aeq = np.asarray(A_eq) if A_eq is not None else np.empty([0, len(cc)])
    Aub = np.asarray(A_ub) if A_ub is not None else np.empty([0, len(cc)])
    beq = np.ravel(np.asarray(b_eq)) if b_eq is not None else np.empty([0])
    bub = np.ravel(np.asarray(b_ub)) if b_ub is not None else np.empty([0])

    # Analyze the bounds and determine what modifications to be made to
    # the constraints in order to accommodate them.
    L = np.zeros(n, dtype=np.float64)
    U = np.ones(n, dtype=np.float64) * np.inf
    if bounds is None or len(bounds) == 0:
        pass
    elif len(bounds) == 2 and not hasattr(bounds[0], '__len__'):
        # All bounds are the same
        a = bounds[0] if bounds[0] is not None else -np.inf
        b = bounds[1] if bounds[1] is not None else np.inf
        L = np.asarray(n * [a], dtype=np.float64)
        U = np.asarray(n * [b], dtype=np.float64)
    else:
        if len(bounds) != n:
            status = -1
            message = ("Invalid input for linprog with method = 'simplex'.  "
                       "Length of bounds is inconsistent with the length of c")
        else:
            try:
                for i in range(n):
                    if len(bounds[i]) != 2:
                        raise IndexError()
                    L[i] = bounds[i][0] if bounds[i][0] is not None else -np.inf
                    U[i] = bounds[i][1] if bounds[i][1] is not None else np.inf
            except IndexError:
                status = -1
                message = ("Invalid input for linprog with "
                           "method = 'simplex'.  bounds must be a n x 2 "
                           "sequence/array where n = len(c).")

    if np.any(L == -np.inf):
        # If any lower-bound constraint is a free variable
        # add the first column variable as the "floor" variable which
        # accommodates the most negative variable in the problem.
        n = n + 1
        L = np.concatenate([np.array([0]), L])
        U = np.concatenate([np.array([np.inf]), U])
        cc = np.concatenate([np.array([0]), cc])
        Aeq = np.hstack([np.zeros([Aeq.shape[0], 1]), Aeq])
        Aub = np.hstack([np.zeros([Aub.shape[0], 1]), Aub])
        have_floor_variable = True

    # Now before we deal with any variables with lower bounds < 0,
    # deal with finite bounds which can be simply added as new constraints.
    # Also validate bounds inputs here.
    for i in range(n):
        if (L[i] > U[i]):
            status = -1
            message = ("Invalid input for linprog with method = 'simplex'.  "
                       "Lower bound %d is greater than upper bound %d" %
                       (i, i))

        if np.isinf(L[i]) and L[i] > 0:
            status = -1
            message = ("Invalid input for linprog with method = 'simplex'.  "
                       "Lower bound may not be +infinity")

        if np.isinf(U[i]) and U[i] < 0:
            status = -1
            message = ("Invalid input for linprog with method = 'simplex'.  "
                       "Upper bound may not be -infinity")

        if np.isfinite(L[i]) and L[i] > 0:
            # Add a new lower-bound (negative upper-bound) constraint
            Aub = np.vstack([Aub, np.zeros(n)])
            Aub[-1, i] = -1
            bub = np.concatenate([bub, np.array([-L[i]])])
            L[i] = 0

        if np.isfinite(U[i]):
            # Add a new upper-bound constraint
            Aub = np.vstack([Aub, np.zeros(n)])
            Aub[-1, i] = 1
            bub = np.concatenate([bub, np.array([U[i]])])
            U[i] = np.inf

    # Now find negative lower bounds (finite or infinite) which require a
    # change of variables or free variables and handle them appropriately
    for i in range(0, n):
        if L[i] < 0:
            if np.isfinite(L[i]) and L[i] < 0:
                # Add a change of variables for x[i]
                # For each row in the constraint matrices, we take the
                # coefficient from column i in A,
                # and subtract the product of that and L[i] to the RHS b
                beq = beq - Aeq[:, i] * L[i]
                bub = bub - Aub[:, i] * L[i]
                # We now have a nonzero initial value for the objective
                # function as well.
                f0 = f0 - cc[i] * L[i]
            else:
                # This is an unrestricted variable, let x[i] = u[i] - v[0]
                # where v is the first column in all matrices.
                Aeq[:, 0] = Aeq[:, 0] - Aeq[:, i]
                Aub[:, 0] = Aub[:, 0] - Aub[:, i]
                cc[0] = cc[0] - cc[i]

        if np.isinf(U[i]):
            if U[i] < 0:
                status = -1
                message = ("Invalid input for linprog with "
                           "method = 'simplex'.  Upper bound may not be -inf.")

    # The number of upper bound constraints (rows in A_ub and elements in b_ub)
    mub = len(bub)

    # The number of equality constraints (rows in A_eq and elements in b_eq)
    meq = len(beq)

    # The total number of constraints
    m = mub + meq

    # The number of slack variables (one for each of the upper-bound constraints)
    n_slack = mub

    # The number of artificial variables (one for each lower-bound and equality
    # constraint)
    n_artificial = meq + np.count_nonzero(bub < 0)

    try:
        Aub_rows, Aub_cols = Aub.shape
    except ValueError:
        raise ValueError("Invalid input.  A_ub must be two-dimensional")

    try:
        Aeq_rows, Aeq_cols = Aeq.shape
    except ValueError:
        raise ValueError("Invalid input.  A_eq must be two-dimensional")

    if Aeq_rows != meq:
        status = -1
        message = ("Invalid input for linprog with method = 'simplex'.  "
                   "The number of rows in A_eq must be equal "
                   "to the number of values in b_eq")

    if Aub_rows != mub:
        status = -1
        message = ("Invalid input for linprog with method = 'simplex'.  "
                   "The number of rows in A_ub must be equal "
                   "to the number of values in b_ub")

    if Aeq_cols > 0 and Aeq_cols != n:
        status = -1
        message = ("Invalid input for linprog with method = 'simplex'.  "
                   "Number of columns in A_eq must be equal "
                   "to the size of c")

    if Aub_cols > 0 and Aub_cols != n:
        status = -1
        message = ("Invalid input for linprog with method = 'simplex'.  "
                   "Number of columns in A_ub must be equal to the size of c")

    if status != 0:
        # Invalid inputs provided
        raise ValueError(message)

    # Create the tableau
    T = np.zeros([m + 2, n + n_slack + n_artificial + 1])

    # Insert objective into tableau
    T[-2, :n] = cc
    T[-2, -1] = f0

    b = T[:-2, -1]

    if meq > 0:
        # Add Aeq to the tableau
        T[:meq, :n] = Aeq
        # Add beq to the tableau
        b[:meq] = beq
    if mub > 0:
        # Add Aub to the tableau
        T[meq:meq + mub, :n] = Aub
        # At bub to the tableau
        b[meq:meq + mub] = bub
        # Add the slack variables to the tableau
        np.fill_diagonal(T[meq:m, n:n + n_slack], 1)

    # Further set up the tableau.
    # If a row corresponds to an equality constraint or a negative b (a lower
    # bound constraint), then an artificial variable is added for that row.
    # Also, if b is negative, first flip the signs in that constraint.
    slcount = 0
    avcount = 0
    basis = np.zeros(m, dtype=int)
    r_artificial = np.zeros(n_artificial, dtype=int)
    for i in range(m):
        if i < meq or b[i] < 0:
            # basic variable i is in column n+n_slack+avcount
            basis[i] = n + n_slack + avcount
            r_artificial[avcount] = i
            avcount += 1
            if b[i] < 0:
                b[i] *= -1
                T[i, :-1] *= -1
            T[i, basis[i]] = 1
            T[-1, basis[i]] = 1
        else:
            # basic variable i is in column n+slcount
            basis[i] = n + slcount
            slcount += 1

    # Make the artificial variables basic feasible variables by subtracting
    # each row with an artificial variable from the Phase 1 objective
    for r in r_artificial:
        T[-1, :] = T[-1, :] - T[r, :]

    nit1, status = _solve_simplex(T,
                                  n,
                                  basis,
                                  phase=1,
                                  callback=callback,
                                  maxiter=maxiter,
                                  tol=tol,
                                  bland=bland)

    # if pseudo objective is zero, remove the last row from the tableau and
    # proceed to phase 2
    if abs(T[-1, -1]) < tol:
        # Remove the pseudo-objective row from the tableau
        T = T[:-1, :]
        # Remove the artificial variable columns from the tableau
        T = np.delete(T, np.s_[n + n_slack:n + n_slack + n_artificial], 1)
    else:
        # Failure to find a feasible starting point
        status = 2

    if status != 0:
        message = messages[status]
        if disp:
            print(message)
        return OptimizeResult(x=np.nan,
                              fun=-T[-1, -1],
                              nit=nit1,
                              status=status,
                              message=message,
                              success=False)

    # Phase 2
    nit2, status = _solve_simplex(T,
                                  n,
                                  basis,
                                  maxiter=maxiter - nit1,
                                  phase=2,
                                  callback=callback,
                                  tol=tol,
                                  nit0=nit1,
                                  bland=bland)

    solution = np.zeros(n + n_slack + n_artificial)
    solution[basis[:m]] = T[:m, -1]
    x = solution[:n]
    slack = solution[n:n + n_slack]

    # For those variables with finite negative lower bounds,
    # reverse the change of variables
    masked_L = np.ma.array(L, mask=np.isinf(L), fill_value=0.0).filled()
    x = x + masked_L

    # For those variables with infinite negative lower bounds,
    # take x[i] as the difference between x[i] and the floor variable.
    if have_floor_variable:
        for i in range(1, n):
            if np.isinf(L[i]):
                x[i] -= x[0]
        x = x[1:]

    # Optimization complete at this point
    obj = -T[-1, -1]

    if status in (0, 1):
        if disp:
            print(messages[status])
            print("         Current function value: {0: <12.6f}".format(obj))
            print("         Iterations: {0:d}".format(nit2))
    else:
        if disp:
            print(messages[status])
            print("         Iterations: {0:d}".format(nit2))

    return OptimizeResult(x=x,
                          fun=obj,
                          nit=int(nit2),
                          status=status,
                          slack=slack,
                          message=messages[status],
                          success=(status == 0))
Пример #54
0
def _check_counts_matrix(counts,
                         lengths,
                         ploidy,
                         exclude_zeros=False,
                         chrom_subset_index=None):
    """Check counts dimensions, reformat, & excise selected chromosomes.
    """

    if chrom_subset_index is not None and len(chrom_subset_index) / max(
            counts.shape) not in (1, 2):
        raise ValueError(
            "chrom_subset_index size (%d) does not fit counts"
            " shape (%d, %d)." %
            (len(chrom_subset_index), counts.shape[0], counts.shape[1]))
    if len(counts.shape) != 2:
        raise ValueError(
            "Counts matrix must be two-dimensional, current shape = (%s)" %
            ', '.join([str(x) for x in counts.shape]))
    if any([x > lengths.sum() * ploidy for x in counts.shape]):
        raise ValueError(
            "Counts matrix shape (%d, %d) is greater than number"
            " of beads (%d) in %s genome." %
            (counts.shape[0], counts.shape[1], lengths.sum() * ploidy, {
                1: "haploid",
                2: "diploid"
            }[ploidy]))
    if any([x / lengths.sum() not in (1, 2) for x in counts.shape]):
        raise ValueError(
            "Counts matrix shape (%d, %d) does not match lenghts"
            " (%s)" %
            (counts.shape[0], counts.shape[1], ",".join(map(str, lengths))))

    empty_val = 0
    torm = np.full((max(counts.shape)), False)
    if not exclude_zeros:
        empty_val = np.nan
        torm = find_beads_to_remove(counts, max(counts.shape))
        counts = counts.astype(float)

    if sparse.issparse(counts) or isinstance(counts, CountsMatrix):
        counts = counts.toarray()
    if not isinstance(counts, np.ndarray):
        counts = np.array(counts)

    if not np.array_equal(counts[~np.isnan(counts)],
                          counts[~np.isnan(counts)].round()):
        warn("Counts matrix must only contain integers or NaN")

    if counts.shape[0] == counts.shape[1]:
        counts[np.tril_indices(counts.shape[0])] = empty_val
        counts[torm, :] = empty_val
        counts[:, torm] = empty_val
        if chrom_subset_index is not None:
            counts = counts[
                chrom_subset_index[:counts.
                                   shape[0]], :][:,
                                                 chrom_subset_index[:counts.
                                                                    shape[1]]]
    elif min(counts.shape) * 2 == max(counts.shape):
        homo1 = counts[:min(counts.shape), :min(counts.shape)]
        homo2 = counts[counts.shape[0] - min(counts.shape):,
                       counts.shape[1] - min(counts.shape):]
        if counts.shape[0] == min(counts.shape):
            homo1 = homo1.T
            homo2 = homo2.T
        np.fill_diagonal(homo1, empty_val)
        np.fill_diagonal(homo2, empty_val)
        homo1[:,
              torm[:min(counts.shape)] | torm[min(counts.shape):]] = empty_val
        homo2[:,
              torm[:min(counts.shape)] | torm[min(counts.shape):]] = empty_val
        # axis=0 is vertical concat
        counts = np.concatenate([homo1, homo2], axis=0)
        counts[torm, :] = empty_val
        if chrom_subset_index is not None:
            counts = counts[
                chrom_subset_index[:counts.
                                   shape[0]], :][:,
                                                 chrom_subset_index[:counts.
                                                                    shape[1]]]
    else:
        raise ValueError("Input counts matrix is - %d by %d. Counts must be"
                         " n-by-n or n-by-2n or 2n-by-2n." %
                         (counts.shape[0], counts.shape[1]))

    if exclude_zeros:
        counts[np.isnan(counts)] = 0
        counts = sparse.coo_matrix(counts)

    return counts
    def implementCircuitModel(self, initConditions, circuitParams, model, time,
                              dx, eps, frequency, delta_phi0):
        epsilonR = circuitParams.epsilonR
        epsilonOx = circuitParams.epsilonOx
        epsilonStern = circuitParams.epsilonStern
        C0 = circuitParams.C0
        Cox = circuitParams.Cox
        Cstern = circuitParams.Cstern
        #CB=circuitParams.CB
        R = circuitParams.R

        cInit = initConditions.cInit
        c0 = cInit[0] / 2
        q0Init = initConditions.q0Init
        v0Init = initConditions.v0Init
        vOxInit = initConditions.vOxInit
        vSternInit = initConditions.vSternInit
        vBInit = initConditions.vBInit
        c0Init = initConditions.c0Init
        R0Init = initConditions.R0Init

        simulationType = model.simulationType
        Normal = model.NormalCircuit
        Reduced = model.ReducedCircuit
        circuitModel = model.circuitModel
        LinCapModel = model.LinCapModel
        NonLinCapModel = model.NonLinCapModel
        NonLinCapVarResModel = model.NonLinCapVarResModel
        NonLinCapInhomResModel = model.NonLinCapInhomResModel
        NonLinCapVarResROxModel = model.NonLinCapVarResROxModel

        if circuitModel == LinCapModel:
            iInit = ([
                0, (delta_phi0 - (v0Init + vOxInit + vSternInit)) / R, v0Init,
                vOxInit, vSternInit,
                delta_phi0 - (v0Init + vOxInit + vSternInit)
            ])
            i = odeint(circuitModel_LinCap,
                       iInit,
                       time,
                       args=(R, C0, Cox, Cstern, frequency, delta_phi0))
            charge = i[:, 0]
            current = i[:, 1]
            vEdl = i[:, 2]
            vOx = i[:, 3]
            vStern = i[:, 4]
            vBulk = i[:, -1]
            m = 1. / dx
            c = np.ones((len(vBulk), m))
            c = 2 * c
            cTot = (0.5 *
                    (c[:, 0] + c[:, -1]) + np.sum(c[:, 1:-1], axis=1)) * dx

            R = R * np.ones(np.shape(vBulk))
            timeBulk = time
        elif circuitModel == NonLinCapModel:
            iInit = ([
                vOxInit * Cox,
                (delta_phi0 - 2 * (v0Init + vOxInit + vSternInit)) / R, v0Init,
                vOxInit, vSternInit,
                delta_phi0 - 2 * (v0Init + vOxInit + vSternInit)
            ])
            i = odeint(circuitModel_NonLinCap,
                       iInit,
                       time,
                       args=(R, C0, Cox, Cstern, frequency, delta_phi0))
            charge = i[:, 0]
            current = i[:, 1]
            vEdl = i[:, 2]
            vOx = i[:, 3]
            vStern = i[:, 4]
            vBulk = i[:, -1]
            m = 1. / dx
            c = cInit[0] * np.ones((len(vBulk), m))
            c = 2 * c
            cTot = (0.5 *
                    (c[:, 0] + c[:, -1]) + np.sum(c[:, 1:-1], axis=1)) * dx
            R = R * np.ones(np.shape(vBulk))
            timeBulk = time
        elif circuitModel == NonLinCapVarResModel:
            #            #nFreq=len(frequency)
            #            iInit=([q0Init,(vBInit-(v0Init+vOxInit+vSternInit))/R,v0Init,vOxInit,vSternInit,c0,R,vBInit-(v0Init+vOxInit+vSternInit)])
            #            iInit=np.append(q0Init,(vBInit-(v0Init+vOxInit+vSternInit))/R)#([0,delta_phi0/R,0,R,cInit,eps,delta_phi0])
            #            iInit=np.append(iInit,v0Init)
            #            iInit=np.append(iInit,vOxInit)
            #            iInit=np.append(iInit,vSternInit)
            #            iInit=np.append(iInit,c0Init)
            #            iInit=np.append(iInit,R0Init)
            #            iInit=np.append(iInit,vBInit-(v0Init+vOxInit+vSternInit))
            iInit = ([
                q0Init, (vBInit - (v0Init + vOxInit + vSternInit)) / R, v0Init,
                vOxInit, vSternInit, c0, R,
                vBInit - (v0Init + vOxInit + vSternInit)
            ])
            #            charge,current,vEdl,vOx,vStern,R,c,vBulk=odeint(circuitModel_NonLinCap_VarRes,iInit,time,args=(C0,Cox,Cstern,frequency,delta_phi0));
            i = odeint(circuitModel_NonLinCap_VarRes,
                       iInit,
                       time,
                       args=(C0, Cox, Cstern, frequency, delta_phi0))
            #print("i",i,np.shape(i))
            [charge, current, vEdl, vOx, vStern, c, R,
             vBulk] = [i[:, m] for m in range(len(iInit))]

            #            charge=i[:,0]
            #            current=i[:,1]
            #            vEdl=i[:,2]
            #            vOx=i[:,3]
            #            vStern=i[:,4]
            #            R=i[:,5]
            #            c=i[:,6]
            #            vBulk=i[:,-1]
            #            m=int(1./dx);
            #            c=np.multiply(np.reshape(c,(len(vBulk),1)),np.matrix(np.ones((len(vBulk),m))))
            #            c=2*c
            #            cTot=(0.5*(c[:,0]+c[:,-1])+np.sum(c[:,1:-1],axis=1))*dx
            c = 2 * c
            cTot = np.array([2.0])
            #print("time",time,len(time))
            timeBulk = time
            #plt.plot(time,R)
            # plt.plot(time,vBulk)
        elif circuitModel == NonLinCapVarResROxModel:
            ROx = circuitParams.ROx
            iInit = ([
                q0Init, (vBInit - (v0Init + vOxInit + vSternInit)) / R, v0Init,
                vOxInit, vSternInit, c0, R,
                vBInit - (v0Init + vOxInit + vSternInit)
            ])
            #iInit=([0,(vBInit-(v0Init+vOxInit+vSternInit))/R,v0Init,vOxInit,vSternInit,c0,R,vBInit-(v0Init+vOxInit+vSternInit)])
            i = odeint(circuitModelNonLinCapROxVarRes,
                       iInit,
                       time,
                       args=(C0, Cox, Cstern, ROx, frequency, delta_phi0))
            [charge, current, vEdl, vOx, vStern, c, R,
             vBulk] = [i[:, m] for m in range(len(iInit))]
            #            charge=i[:,0]
            #            current=i[:,1]
            #            vEdl=i[:,2]
            #            vOx=i[:,3]
            #            vStern=i[:,4]
            #            R=i[:,5]
            #            c=i[:,6]
            #            vBulk=i[:,-1]
            #            m=int(1./dx);
            #            c=np.multiply(np.reshape(c,(len(vBulk),1)),np.matrix(np.ones((len(vBulk),m))))
            c = 2 * c
            cTot = np.array([2.0])
            #(0.5*(c[:,0]+c[:,-1])+np.sum(c[:,1:-1],axis=1))*dx
            timeBulk = time
        elif circuitModel == NonLinCapInhomResModel:
            m = int(1.0 / dx)
            n = int(1.0 / dx)
            cInit = (2 * c0 * np.ones((m, 1)))
            x = np.linspace(0, 1, m)
            diags = np.asarray([1.0, 2.0, 1.0])
            A = np.zeros((n, m), diags.dtype)
            b = np.zeros((n, 1), diags.dtype)
            for z, v in enumerate((1.0, -2.0, 1.0)):
                np.fill_diagonal(A[1:-1, z:], v)
            A[0, 0] = -2.0
            A[0, 1] = 2.0
            A[-1, -1] = -2.0
            A[-1, -2] = 2.0
            A = A / (dx**2)
            A = np.matrix(A)
            b[0] = -2
            b[-1] = -2
            iInit = np.append(0, (vBInit - (v0Init + vOxInit + vSternInit)) /
                              R)  #([0,delta_phi0/R,0,R,cInit,eps,delta_phi0])
            iInit = np.append(iInit, v0Init)
            iInit = np.append(iInit, vOxInit)
            iInit = np.append(iInit, vSternInit)
            iInit = np.append(iInit, R)
            iInit = np.append(iInit, cInit)
            iInit = np.append(iInit, vBInit - (v0Init + vOxInit + vSternInit))
            #print iInit
            i = odeint(circuitModel_NonLinCap_InhomResPython,
                       iInit,
                       time,
                       args=(dx, eps, C0, Cox, Cstern, frequency, delta_phi0,
                             A, b))
            charge = i[:, 0]
            current = i[:, 1]
            vEdl = i[:, 2]
            vOx = i[:, 3]
            vStern = i[:, 4]
            R = i[:, 5]
            vBulk = i[:, -1]
            c = i[:, 6:-1]
            cTot = (0.5 *
                    (c[:, 0] + c[:, -1]) + np.sum(c[:, 1:-1], axis=1)) * dx
            timeBulk = time
        return [
            charge, current, vEdl, vOx, vStern, c, cTot, R, vBulk, timeBulk
        ]
# Q2
# Jacobi iteration method
A = np.array([[8,-1,3,-1], [-1,6,2,0], [3,2,9,1], [-1,0,1,7]], dtype = float)

## Sign Function
import numpy as np

def sign(x):
        if x >= 0:
            return 1
        else:
            return -1
        
while True:
    mask = np.ones(A.shape, dtype = bool)
    np.fill_diagonal(mask, 0)
    idx = np.where(abs(A) == abs(A[mask]).max())
    p = idx[0][0]
    q = idx[0][1]
    rho = 0.5*(A[q][q] - A[p][p])/A[p][q]

    t = sign(rho)/(abs(rho) + np.sqrt(rho*rho + 1))
    c = 1./(np.sqrt(1 + t*t))
    s = c*t

    ## Constructing the matrix D    
    D[:] = A[:]
    D[p][q] = 0
    D[q][p] = 0
    D[p][p] = c*c*A[p][p] + s*s*A[q][q] - 2*c*s*A[p][q]
    D[q][q] = s*s*A[p][p] + c*c*A[q][q] + 2*c*s*A[p][q]
Пример #57
0
atlas = datasets.fetch_atlas_aal()
# Loading atlas image stored in 'maps'
atlas_filename = atlas['maps']
# Loading atlas data stored in 'labels'
labels = atlas['labels']

labels = brain_roi_labels_

############################################################################
# Build and display a correlation matrix
correlation_measure = ConnectivityMeasure(kind='correlation')
correlation_matrix = correlation_measure.fit_transform([time_series_])[0]

plt.figure(figsize=(10, 10))
# Mask out the major diagonal
numpy.fill_diagonal(correlation_matrix, 0)
plt.imshow(correlation_matrix,
           interpolation="nearest",
           cmap="RdBu_r",
           vmax=0.8,
           vmin=-0.8)
plt.colorbar()
# And display the labels
x_ticks = plt.xticks(range(len(labels)), labels, rotation=90)
y_ticks = plt.yticks(range(len(labels)), labels)

#coords = atlas.region_coords

# We threshold to keep only the 20% of edges with the highest value
# because the graph is very dense
plotting.plot_connectome(correlation_matrix,
Пример #58
0
def createMPLengthThreeV2(pDict, pDir, verbosity):
    # Get the list of the primary path matrices
    pNames = list(pDict.keys())
    pNames.sort()
    pNum = len(pNames)

    # Get the list of all metapaths created thus far
    mDict = readKeyFilePP(pDir)

    # Set the number from which to start naming matrices
    mNum = NChooseK(pNum, 2) + (2 * pNum)

    # Check if all expected 3-step paths were created
    #	exit if they are
    mNumExpected = math.pow(pNum, 3)
    mp3Count = 0
    for mpName in list(mDict.keys()):
        if mpName.count('-') == 2:
            mp3Count += 1
    # end loop
    if mp3Count == mNumExpected:
        if verbosity:
            print("All 3-length paths already computed ...")
        return
    elif mp3Count >= mNumExpected:
        print("ERROR: Uh-oh, more 3-length paths than expected already exist!")
        sys.exit()
    # end if

    # Get the matrix dimensions from genes.txt file
    sizeOf = 0
    pDirAlt = pDir
    if pDirAlt.endswith("_MetaPaths/"):
        pDirAlt = pDirAlt[:-11] + '/'
    with open(pDirAlt + 'genes.txt') as fin:
        for line in fin:
            sizeOf += 1
    # end with

    # Build list of 2-step path names
    mp2Names = list()
    for mpName in list(mDict.keys()):
        if mpName.count('-') == 1:
            mp2Names.append(mpName)
    # end loop
    mp2Names.sort()

    # Multiply each matrix pair
    pNameSet = set()
    for p1 in pNames:

        # Load the first matrix
        mFileA = str(pDict[p1]).zfill(FNAME_ZPAD) + MATRIX_EXTENSION
        matrixA = getPathMatrixV2(mFileA, pDir, sizeOf)

        sizeGB = matrixA.nbytes / 1.0e9
        if verbosity > 1:
            print("-- loaded {}, size: {:.3f} GBytes".format(p1, sizeGB))

        for p2 in mp2Names:
            # The name of this path
            name = p1 + '-' + p2
            # The name of the reversed path
            p2v = p2.split('-')
            nameRev = p2v[1] + '-' + p2v[0] + '-' + p1

            # Optionally skipping consecutive edges
            if not KEEP_DOUBLE:
                if p1 == p2:
                    continue
            # end if

            if name in pNameSet:
                continue
            # end if

            # Create new matrix if file doesn't already exist
            newMFName = str(mNum).zfill(FNAME_ZPAD) + MATRIX_EXTENSION

            if not os.path.isfile(pDir + newMFName):
                # Save a placeholder (allows multiple threads, to skip this one)
                fakeMx = np.ones((2, 2))
                saveMatrixNumpyV2(fakeMx, newMFName, pDir, name,
                                  MX_SAVE_AS_INT)

                # Load the second matrix
                mFileB = str(mDict[p2][0]).zfill(FNAME_ZPAD) + MATRIX_EXTENSION
                matrixB = getPathMatrixV2(mFileB, pDir, sizeOf)

                # eliminate loops by removing self-connections
                np.fill_diagonal(matrixA, 0)
                np.fill_diagonal(matrixB, 0)
                # NOTE: Final saved matrix will contain self-connections

                # Multiply the two matrices
                if verbosity > 1:
                    print("-- -- creating matrix {}".format(name))
                newM = np.dot(matrixA, matrixB)

                if verbosity > 1:
                    print("-- -- -- saving matrix {}".format(newMFName))
                saveMatrixNumpyV2(newM, newMFName, pDir, name, MX_SAVE_AS_INT)
            # end if

            # end if

            # Add the matrix name & number to mDict
            if name == nameRev:  # (ie: typeA-typeA)
                # Then add just this matrix to the list
                mDict[name] = [mNum, False]
                pNameSet.add(name)
            else:
                # Add this path & note the reverse path
                mDict[name] = [mNum, False]
                pNameSet.add(name)
                #	Reverse path uses transpose
                mDict[nameRev] = [mNum, True]
                pNameSet.add(nameRev)
            # end if
            mNum += 1
    # end loop

    saveKeyFile(mDict, pDir)
    return
Пример #59
0
def compute_distance_matrix(X):
    dists = np.sum(np.square(X), axis = 1) \
        + np.transpose([np.sum(np.square(X), axis = 1)]) -2*np.dot(X, X.T)
    dists = np.sqrt(abs(dists))
    np.fill_diagonal(dists, 0)
    return dists
Пример #60
0
    def __init__(self, **kwargs):
        params = {
            'max_steps': 50,
            'dim': 2,
            'num': 1,
            'radius': [1.0],
            'mass': [1.0],
            'pp_collision_law': 'pp_specular',
            'gamma': 'uniform'
        }
        params.update(kwargs)

        if (params['gamma'] == 'uniform'):
            params['gamma'] = np.sqrt(2 / (2 + params['dim']))
        elif (params['gamma'] == 'shell'):
            params['gamma'] = np.sqrt(2 / params['dim'])
        elif (params['gamma'] == 'point'):
            params['gamma'] = 0

        # Each parameter list must be num_particles long.  If not, this will extend by filling with the last entry
        constants = ['radius', 'mass', 'gamma']
        for const in constants:
            c = listify(params[const])  #listify defined at bottom of this file
            for p in range(len(c), params['num']):
                c.append(c[-1])
            params[const] = np.asarray(c).astype(float)

        for key, val in params.items():
            if isinstance(val, list):
                val = np.asarray(val)  # converts lists to arrays
            setattr(self, key, val)
        self.mom_inert = self.mass * (self.gamma * self.radius)**2
        self.get_mesh()

        self.pp_gap_min = cross_subtract(self.radius, -self.radius)
        np.fill_diagonal(self.pp_gap_min,
                         -1)  # no gap between a particle and itself

        self.wp_dt = np.zeros([len(wall), self.num], dtype='float')
        self.wp_mask = self.wp_dt.copy().astype(bool)

        if self.pp_collision_law == 'pp_ignore':
            self.pp_dt = np.array([np.inf])
        else:
            self.pp_dt = np.zeros([self.num, self.num], dtype='float')
        self.pp_mask = self.pp_dt.copy().astype(bool)

        self.t = 0.0
        self.cell_offset = np.zeros(
            [self.num, self.dim],
            dtype=int)  # tracks which cell the particle is in
        self.col = {}
        self.t_hist = []
        self.col_hist = []
        self.pos_hist = []
        self.vel_hist = []
        self.spin_hist = []

        # Color particles (helpful for the future when we have many particles)
        cm = plt.cm.gist_rainbow
        idx = np.linspace(0, cm.N - 1, self.num).round().astype(int)
        self.clr = [cm(i) for i in idx]