Ejemplo n.º 1
0
 def autocor_two_time(self,print_=False,save_=True,filename=None):
     global buf,num,cts,cur,g12, countl        
     global Ndel,Npix
     global time_ind  #generate a time-frame for each level
     global g12x, g12y, g12z #for interpolate
     start_time = time.time()
     buf=zeros([nolev,nobuf,nopixels])  #// matrix of buffers, for store img
     cts=zeros(nolev)
     cur=ones(nolev) * nobuf
     countl = array(zeros(  nolev ),dtype='int')        
     g12 =  zeros( [ noframes,noframes, noqs] ) 
     g12x=[]
     g12y=[]
     g12z=[]        
     num= array(zeros(  nolev ),dtype='int')        
     time_ind ={key: [] for key in range(nolev)}         
     ttx=0        
     for n in range(1,noframes +1 ):   ##do the work here
         self.insertimg_twotime(begframe+n-1, print_=print_)
         if  n %(noframes/10) ==0:
             sys.stdout.write("#")
             sys.stdout.flush()                
     for q in range(noqs):            
         x0 =  g12[:,:,q]
         g12[:,:,q] = tril(x0) +  tril(x0).T - diag(diag(x0))            
     elapsed_time = time.time() - start_time
     print 'Total time: %.2f min' %(elapsed_time/60.)
     if save_:
         if filename==None:
             filename =  'g12_-%s-%s_ImgReadMethod_'%(
         begframe,begframe+noframes-1)+FOUT
             
         save(  RES_DIR + filename+FOUT, g12)
         print 'the %s was stored in %s'%(filename,RES_DIR)
     return g12, (elapsed_time/60.)
Ejemplo n.º 2
0
def _chol_blocked_fwd(L, Adot, NB=256, inplace=False):
    """
    Forwards-mode differentiation through the Cholesky decomposition
    
    Obtain L_dot from Sigma_dot, where "_dot" means sensitivities in
    forwards-mode differentiation, and Sigma = L @ L.T.

    This version uses a blocked algorithm to update sensitivities Adot
    in place. tril(Adot) should start containing Sigma_dot, and will
    end containing the L_dot. Take tril() of the answer if
    triu(Adot,1) did not start out filled with zeros. Unlike the
    unblocked routine, if the upper triangular part of Adot started
    with non-zero values, some of these will be overwritten.

    If inplace=False, a copy of Adot is modified instead of the
    original. The Abar that was modified is returned.
    """
    if not inplace:
        Adot = Adot.copy()
    for j in range(0, L.shape[0], NB):
        k = min(N, j + NB)
        R, D, B, C = _level3partition(L, j, k)
        Rdot, Ddot, Bdot, Cdot = _level3partition(Adot, j, k)
        Ddot[:] = tril(Ddot) - tril(np.dot(Rdot, R.T) + np.dot(R, Rdot.T))
        #chol_unblocked_fwd(D, Ddot, inplace=True) # slow in Python
        Ddot[:] = _chol_symbolic_fwd(D, Ddot + tril(Ddot, -1).T)
        Cdot -= (np.dot(Bdot, R.T) + np.dot(B, Rdot.T))
        #Cdot[:] = (Cdot - [email protected]) @ inv(tril(D)).T
        Cdot[:] = _st(D, Cdot.T - np.dot(Ddot, C.T)).T
    return Adot
Ejemplo n.º 3
0
def _chol_blocked_rev(L, Abar, NB=256, inplace=False):
    """
    Reverse-mode differentiation through the Cholesky decomposition
    
    Obtain tril(Sigma_bar) from L_bar, where "_bar" means sensitivities
    in reverse-mode differentiation, and Sigma = L @ L.T.

    This version uses a blocked algorithm to update sensitivities Abar
    in place. tril(Abar) should start containing L_bar, and will end
    containing the tril(Sigma_bar). Take tril(Abar) at the end if
    triu(Abar,1) did not start out filled with zeros. Alternatively,
    (tril(Abar) + tril(Abar).T) will give the symmetric, redundant
    matrix of sensitivities.
    
    Unlike the unblocked routine, if the upper triangular part of Abar
    started with non-zero values, some of these will be overwritten.

    If inplace=False, a copy of Abar is modified instead of the
    original. The Abar that was modified is returned.
    """
    if not inplace:
        Abar = Abar.copy()
    for k in range(L.shape[0], -1, -NB):
        j = max(0, k - NB)
        R, D, B, C = _level3partition(L, j, k)
        Rbar, Dbar, Bbar, Cbar = _level3partition(Abar, j, k)
        #Cbar[:] = Cbar @ inv(tril(D))
        Cbar[:] = _st(D, Cbar.T, trans=1).T
        Bbar -= np.dot(Cbar, R)
        Dbar[:] = tril(Dbar) - tril(np.dot(Cbar.T, C))
        #chol_unblocked_rev(D, Dbar, inplace=True) # slow in Python
        Dbar[:] = _chol_symbolic_rev(D, Dbar)
        Rbar -= (np.dot(Cbar.T, B) + np.dot(Dbar + Dbar.T, R))
    return Abar
Ejemplo n.º 4
0
 def test_al_mohy_higham_2012_experiment_1(self):
     # Matrix square root of a tricky upper triangular matrix.
     A = _get_al_mohy_higham_2012_experiment_1()
     A_sqrtm, info = sqrtm(A, disp=False)
     A_round_trip = A_sqrtm.dot(A_sqrtm)
     assert_allclose(A_round_trip, A, rtol=1e-5)
     assert_allclose(np.tril(A_round_trip), np.tril(A))
Ejemplo n.º 5
0
def q150():
    x = numpy.arange(2**20, dtype=numpy.int64)
    x = (615949*x + 797807) % 2**20
    s = numpy.empty(500500+1,dtype=numpy.int64)
    t = 0
    s[0] = 0
    for k in xrange(1,500500+1):
        t = x[t]
        s[k] = t - 2**19
    del x
    print s[1:4]
    r,c = numpy.mgrid[:1000,:1000]
    s = s[numpy.tril(r*(r+1)/2 + c + 1)]
    del r,c
    s0 = s
    best = s.min()
    t = s
    p = numpy.zeros((1001,1001), dtype=numpy.int64)
    for i in xrange(1,1000):
        n = s[:-1,:-1] + numpy.tril(t[1:,:-1]) + t[1:,1:] - numpy.tril(p[2:,1:-1])
        #n = s[:-1,:-1] +t[1:,:-1] + t[1:,1:] - p[2:,1:-1]
        print i,best,t[0,0]
        p,t,s = t,n,s[:-1,:-1]
        best = min(best, t.min())
    print best
Ejemplo n.º 6
0
def plot_distance_matrix(distance_csv_file, outfile="similarity_matrix_plot.pdf"):
    """
    plotting distance matrix between organisms 

    the distance between the organisms are calculated based on the difference in their sequence composition 
    """

    distance = pandas.read_csv(distance_csv_file, header=0)
    C = numpy.tril(distance)
    sim = 1-distance
    C = numpy.tril(sim)
    N = sim.shape[1]
    C = numpy.ma.masked_array(C, C == 0)

    A = numpy.array([(y, x) for x in range(N, -1, -1) for y in range(N + 1)])
    t = numpy.array([[0.5, 1], [0.5, -1]])
    A = numpy.dot(A, t)
    X = A[:, 1].reshape(N + 1, N + 1)
    Y = A[:, 0].reshape(N + 1, N + 1)
    fig = pylab.figure(figsize=(20,20))
    ax = fig.add_subplot(121, frame_on=False, aspect=2.0)
    ax.set_xticks([])
    ax.set_yticks([])
    caxes = pylab.pcolormesh(X, Y, np.flipud(C), axes=ax)
    ax.set_xlim(right=0)
    fig.savefig(outfile, bbox_inches='tight')
Ejemplo n.º 7
0
 def getMechStiffStatistic(self, rangeK, minAA=0, AA='all'):
     """Return number of effective spring constant with set range of
     amino acids of protein structure.
     ``AA`` can be a list with a range of analysed amino acids as:
     [first_aa, last_aa, first_aa2, last_aa2],
     minAA - eliminate amino acids that are within 20aa and
     ``rangeK`` is a list [minK, maxK]"""
     
     model = self.getModel()
     if AA == 'all':
         sm = model.getStiffness()
     elif type(AA) == int:
         sm = model.getStiffness()[0: AA, (-1)*AA-1:-1]
     elif type(AA) == list and len(AA) == 1:
         sm = model.getStiffness()[0: AA, (-1)*AA-1:-1]
     elif type(AA) == list and len(AA) == 4:
         sm = model.getStiffness()[AA[0]:AA[1],AA[2]:AA[3]]
     if minAA > 0:
         sm2 = sm[minAA:-1,0:-1-minAA]  # matrix without close contacts
         sm3 = np.tril(sm2, k=-1)
         #sort_sm2 = np.sort((np.tril(sm2, k=-1)1).flatten())
         a = np.where(np.logical_and(sm3>rangeK[0], sm3<rangeK[1]))
     if minAA == 0:
         sm2 = np.tril(sm, k=-1)
         a = np.where(np.logical_and(sm2>rangeK[0], sm2<rangeK[1]))
     return len(a[0])
Ejemplo n.º 8
0
 def __init__(self, batch_size, mem_size, hidden_size):
     self.hidden_size = hidden_size
     self.mem_size = mem_size
     self.batch_size = batch_size
     N, M, d = batch_size, mem_size, hidden_size
     self.L = np.tril(np.ones([M, M], dtype='float32'))
     self.sL = np.tril(np.ones([M, M], dtype='float32'), k=-1)
Ejemplo n.º 9
0
def cmat_for_key(connectome, key, number_of_nodes=None,
                 force_symmetric=True):
    """Return a N x N connection matrix for given connectome and
    key. The connection matrix is returned as a numpy ndarray.

    """

    # create our new shiny connection matrix
    import numpy
    if number_of_nodes is None:
        n = max(connectome.nodes())
    else:
        n = number_of_nodes
    new_cmat = numpy.zeros((n,n))

    # extract the value for key for every edge in the given connectome
    for i,j in connectome.edges_iter():
        new_cmat[i-1][j-1] = connectome[i][j][key]
        
    # do we need to do anything regarding symmetry?
    if force_symmetric and (new_cmat - new_cmat.T != 0).any():
        #...if one-sided (no information below diagonal)
        if (numpy.tril(new_cmat,-1) == 0).all():
            # project above diagonal onto below diagonal
            new_cmat += numpy.tril(new_cmat.T, -1)
        #...else, we will assume two-sided unequal
        else:
            # our solution will be to take the mean of each pair of
            # reflected indices
            new_cmat = (new_cmat + new_cmat.T ) / 2.0

    # return the cmat
    return new_cmat
Ejemplo n.º 10
0
	def connectionParameterMatrix(self, parameter):
		if utils.isCallable(parameter):
			matrix = parameter((self.noNodes, self.noNodes)) * self.connections
			matrix = np.tril(matrix) + np.tril(matrix).T # ensure symmetry
		else:
			matrix = parameter * np.ones((self.noNodes, self.noNodes)) * self.connections
		return matrix
Ejemplo n.º 11
0
def test_separate_independent_mok(session_tf):
    """
    We use different independent kernels for each of the output dimensions.
    We can achieve this in two ways:
        1) efficient: SeparateIndependentMok with Shared/SeparateIndependentMof
        2) inefficient: SeparateIndependentMok with InducingPoints
    However, both methods should return the same conditional,
    and after optimization return the same log likelihood.
    """
    # Model 1 (INefficient)
    q_mu_1 = np.random.randn(Data.M * Data.P, 1)
    q_sqrt_1 = np.tril(np.random.randn(Data.M * Data.P, Data.M * Data.P))[None, ...]  # 1 x MP x MP
    kern_list_1 = [RBF(Data.D, variance=0.5, lengthscales=1.2) for _ in range(Data.P)]
    kernel_1 = mk.SeparateIndependentMok(kern_list_1)
    feature_1 = InducingPoints(Data.X[:Data.M,...].copy())
    m1 = SVGP(Data.X, Data.Y, kernel_1, Gaussian(), feature_1, q_mu=q_mu_1, q_sqrt=q_sqrt_1)
    m1.set_trainable(False)
    m1.q_sqrt.set_trainable(True)
    m1.q_mu.set_trainable(True)
    gpflow.training.ScipyOptimizer().minimize(m1, maxiter=Data.MAXITER)

    # Model 2 (efficient)
    q_mu_2 = np.random.randn(Data.M, Data.P)
    q_sqrt_2 = np.array([np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)])  # P x M x M
    kern_list_2 = [RBF(Data.D, variance=0.5, lengthscales=1.2) for _ in range(Data.P)]
    kernel_2 = mk.SeparateIndependentMok(kern_list_2)
    feature_2 = mf.SharedIndependentMof(InducingPoints(Data.X[:Data.M, ...].copy()))
    m2 = SVGP(Data.X, Data.Y, kernel_2, Gaussian(), feature_2, q_mu=q_mu_2, q_sqrt=q_sqrt_2)
    m2.set_trainable(False)
    m2.q_sqrt.set_trainable(True)
    m2.q_mu.set_trainable(True)
    gpflow.training.ScipyOptimizer().minimize(m2, maxiter=Data.MAXITER)

    check_equality_predictions(session_tf, [m1, m2])
Ejemplo n.º 12
0
        def _test(n):
            """Do an nxn test case worth 5 points."""
            A = self._luTestCase(n)
            L1, U1 = lu(A)
            if not np.allclose(L1.dot(U1), A):
                return _test(n)
            stu = s.lu(A.copy())
            try:
                L2, U2 = stu
            except(TypeError, ValueError):
                raise ValueError("lu() failed to return two arrays")

            pts = 5
            if not all(np.allclose(*x) for x in [(np.tril(L2), L2),
                   (np.triu(U2), U2), (L1, L2), (U1, U2), (A, L2.dot(U2))]):
                pts = 2
                self.feedback += "\n\n{}\nA =\n{}".format('- '*20, A)
                if not np.allclose(np.tril(L2), L2):
                    self.feedback += "\nL not lower triangular:\n{}".format(L2)
                    pts -= 1
                if not np.allclose(np.triu(U2), U2):
                    self.feedback += "\nU not upper triangular:\n{}".format(U2)
                    pts -= 1
                pts += self._eqTest(L1, L2, "lu() failed (L incorrect)")
                pts += self._eqTest(U1, U2, "lu() failed (U incorrect)")
                pts += self._eqTest(A, L2.dot(U2), "lu() failed (A != LU)")
            return pts
Ejemplo n.º 13
0
    def hessian(self, x, lagrange, obj_factor):

        H = np.zeros((2*self._m, 2*self._m))
        H[:self._m, :self._m] = np.tril(np.tril(np.dot(self._A.T, self._A)))

        row, col = self.hessianstructure()

        return obj_factor*H[row, col]
def vec_to_sym(vec, shape):
    mask = np.tril(np.ones(shape)).astype(np.bool)
    sym = np.zeros(vec.shape[:-1] + mask.shape, vec.dtype)
    sym[..., mask] = vec
    sym -= (1 - np.sqrt(2))*np.diag(np.diag(sym))
    sym /= np.sqrt(2)
    sym += np.tril(sym, k=-1).T
    return sym
Ejemplo n.º 15
0
def absMDS(distmat, Z, weights, Vp):
	dZ = eucD(Z)
	dZ[dZ==0] = 1E-5
	bZ = Bcalc(weights, distmat, dZ)
	Xu = Vp.dot(bZ).dot(Z)
	dXu = eucD(Xu)
	stress = np.sqrt(np.tril(weights*(distmat-dXu)**2).sum() / np.tril(dXu**2).sum())
	return stress, Xu
Ejemplo n.º 16
0
def test_riemann():
    """Simple test of the Riemann matrix."""
    n = 10
    a = rogues.riemann(n)
    b = np.tril(-np.ones((n, n)), -1)
    c = np.tril(a, -1)
    # Kind of a goofy prop to check, but it's simple
    npt.assert_array_equal(b, c)
Ejemplo n.º 17
0
def run_tril(dtype, shape, order, inplace):
    ac, ag = gen_gpuarray(shape, dtype, order=order, ctx=context)
    result = tril(ag, inplace=inplace)
    assert numpy.all(numpy.tril(ac) == result)
    if inplace:
        assert numpy.all(numpy.tril(ac) == ag)
    else:
        assert numpy.all(ac == ag)
Ejemplo n.º 18
0
def test_compute_epochs_csd():
    """Test computing cross-spectral density from epochs
    """
    epochs, epochs_sin = _get_data()
    # Check that wrong parameters are recognized
    assert_raises(ValueError, compute_epochs_csd, epochs, mode="notamode")
    assert_raises(ValueError, compute_epochs_csd, epochs, fmin=20, fmax=10)
    assert_raises(ValueError, compute_epochs_csd, epochs, fmin=20, fmax=20.1)
    assert_raises(ValueError, compute_epochs_csd, epochs, tmin=0.15, tmax=0.1)
    assert_raises(ValueError, compute_epochs_csd, epochs, tmin=0, tmax=10)
    assert_raises(ValueError, compute_epochs_csd, epochs, tmin=10, tmax=11)

    data_csd_mt = compute_epochs_csd(epochs, mode="multitaper", fmin=8, fmax=12, tmin=0.04, tmax=0.15)
    data_csd_fourier = compute_epochs_csd(epochs, mode="fourier", fmin=8, fmax=12, tmin=0.04, tmax=0.15)

    # Check shape of the CSD matrix
    n_chan = len(data_csd_mt.ch_names)
    assert_equal(data_csd_mt.data.shape, (n_chan, n_chan))
    assert_equal(data_csd_fourier.data.shape, (n_chan, n_chan))

    # Check if the CSD matrix is hermitian
    assert_array_equal(np.tril(data_csd_mt.data).T.conj(), np.triu(data_csd_mt.data))
    assert_array_equal(np.tril(data_csd_fourier.data).T.conj(), np.triu(data_csd_fourier.data))

    # Computing induced power for comparison
    epochs.crop(tmin=0.04, tmax=0.15)
    power, _ = induced_power(epochs.get_data(), epochs.info["sfreq"], [10], n_cycles=0.6)
    power = np.mean(power, 2)

    # Maximum PSD should occur for specific channel
    max_ch_power = power.argmax()
    max_ch_mt = data_csd_mt.data.diagonal().argmax()
    max_ch_fourier = data_csd_fourier.data.diagonal().argmax()
    assert_equal(max_ch_mt, max_ch_power)
    assert_equal(max_ch_fourier, max_ch_power)

    # Maximum CSD should occur for specific channel
    ch_csd_mt = [np.abs(data_csd_mt.data[max_ch_power][i]) if i != max_ch_power else 0 for i in range(n_chan)]
    max_ch_csd_mt = np.argmax(ch_csd_mt)
    ch_csd_fourier = [np.abs(data_csd_fourier.data[max_ch_power][i]) if i != max_ch_power else 0 for i in range(n_chan)]
    max_ch_csd_fourier = np.argmax(ch_csd_fourier)
    assert_equal(max_ch_csd_mt, max_ch_csd_fourier)

    # Check a list of CSD matrices is returned for multiple frequencies within
    # a given range when fsum=False
    csd_fsum = compute_epochs_csd(epochs, mode="fourier", fmin=8, fmax=20, fsum=True)
    csds = compute_epochs_csd(epochs, mode="fourier", fmin=8, fmax=20, fsum=False)
    freqs = [csd.frequencies[0] for csd in csds]

    csd_sum = np.zeros_like(csd_fsum.data)
    for csd in csds:
        csd_sum += csd.data

    assert len(csds) == 2
    assert len(csd_fsum.frequencies) == 2
    assert_array_equal(csd_fsum.frequencies, freqs)
    assert_array_equal(csd_fsum.data, csd_sum)
Ejemplo n.º 19
0
def test_tfttr_trttf():
    """
    Test conversion routines between the Rectengular Full Packed (RFP) format
    and Standard Triangular Array (TR)
    """
    seed(1234)
    for ind, dtype in enumerate(DTYPES):
        n = 20
        if ind > 1:
            A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
            transr = 'C'
        else:
            A_full = (rand(n, n)).astype(dtype)
            transr = 'T'

        trttf, tfttr = get_lapack_funcs(('trttf', 'tfttr'), dtype=dtype)
        A_tf_U, info = trttf(A_full)
        assert_(info == 0)
        A_tf_L, info = trttf(A_full, uplo='L')
        assert_(info == 0)
        A_tf_U_T, info = trttf(A_full, transr=transr, uplo='U')
        assert_(info == 0)
        A_tf_L_T, info = trttf(A_full, transr=transr, uplo='L')
        assert_(info == 0)

        # Create the RFP array manually (n is even!)
        A_tf_U_m = zeros((n+1, n//2), dtype=dtype)
        A_tf_U_m[:-1, :] = triu(A_full)[:, n//2:]
        A_tf_U_m[n//2+1:, :] += triu(A_full)[:n//2, :n//2].conj().T

        A_tf_L_m = zeros((n+1, n//2), dtype=dtype)
        A_tf_L_m[1:, :] = tril(A_full)[:, :n//2]
        A_tf_L_m[:n//2, :] += tril(A_full)[n//2:, n//2:].conj().T

        assert_array_almost_equal(A_tf_U, A_tf_U_m.reshape(-1, order='F'))
        assert_array_almost_equal(A_tf_U_T,
                                  A_tf_U_m.conj().T.reshape(-1, order='F'))

        assert_array_almost_equal(A_tf_L, A_tf_L_m.reshape(-1, order='F'))
        assert_array_almost_equal(A_tf_L_T,
                                  A_tf_L_m.conj().T.reshape(-1, order='F'))

        # Get the original array from RFP
        A_tr_U, info = tfttr(n, A_tf_U)
        assert_(info == 0)
        A_tr_L, info = tfttr(n, A_tf_L, uplo='L')
        assert_(info == 0)
        A_tr_U_T, info = tfttr(n, A_tf_U_T, transr=transr, uplo='U')
        assert_(info == 0)
        A_tr_L_T, info = tfttr(n, A_tf_L_T, transr=transr, uplo='L')
        assert_(info == 0)

        assert_array_almost_equal(A_tr_U, triu(A_full))
        assert_array_almost_equal(A_tr_U_T, triu(A_full))
        assert_array_almost_equal(A_tr_L, tril(A_full))
        assert_array_almost_equal(A_tr_L_T, tril(A_full))
Ejemplo n.º 20
0
def ratioMDS(distmat, b, Z, weights, Vp):
	dHat = distmat*b
	dZ = eucD(Z)
	dZ[dZ==0] = 1E-5
	bZ = Bcalc(weights, dHat, dZ)
	Xu = Vp.dot(bZ).dot(Z)
	dXu = eucD(Xu)
	stress = np.sqrt(np.tril(weights*(dHat-dXu)**2).sum() / np.tril(dXu**2).sum())
	b = np.tril(weights*distmat*dXu).sum() / np.tril(weights*distmat**2).sum()
	return stress, Xu, b
def passed_test(dtype, as_matrix, provide_C, uplo, trans):
    """
    Run one symmetric rank-2k update test.

    Arguments:
        dtype:        either 'float64' or 'float32', the NumPy dtype to test
        as_matrix:    True to test a NumPy matrix, False to test a NumPy ndarray
        provide_C:    True if C is to be provided to the BLASpy function, False otherwise
        uplo:         BLASpy 'uplo' parameter to test
        trans:        BLASpy 'trans' parameter to test

    Returns:
        True if the expected result is within the margin of error of the actual result,
        False otherwise.
    """

    transpose_a = trans == 't' or trans == 'T'
    upper = uplo == 'u' or uplo == 'U'

    # generate random sizes for matrix dimensions
    m_A = randint(N_MIN, N_MAX)
    n_A = randint(N_MIN, N_MAX)
    n = m_A if not transpose_a else n_A

    # create random scalars and matrices to test
    alpha = uniform(SCAL_MIN, SCAL_MAX)
    beta = uniform(SCAL_MIN, SCAL_MAX)
    A = random_matrix(m_A, n_A, dtype, as_matrix)
    B = random_matrix(m_A, n_A, dtype, as_matrix)
    C = random_symmetric_matrix(n, dtype, as_matrix) if provide_C else None

    # create a copy of  C that can be used to calculate the expected result
    C_2 = copy(C) if C is not None else zeros((n, n))

    # compute the expected result
    if not transpose_a:
        C_2 = (beta * C_2) + (alpha * dot(A, B.T)) + (alpha * dot(B, A.T))
    else:
        C_2 = (beta * C_2) + (alpha * dot(A.T, B)) + (alpha * dot(B.T, A))

    # ensure C and C_2 are upper or lower triangular representations of symmetric matrices
    if upper:
        C_2 = triu(C_2)
        if provide_C:
            C = triu(C)
    else:
        C_2 = tril(C_2)
        if provide_C:
            C = tril(C)

    # get the actual result
    C = syr2k(A, B, C, uplo, trans, alpha, beta)

    # compare the actual result to the expected result and return result of the test
    return allclose(C, C_2, RTOL, ATOL)
Ejemplo n.º 22
0
  def __init__(self, s):
    """Initialize the elastic tensor from a string"""

    if not s:
      raise ValueError("no matrix was provided")

    # Remove braces and pipes
    s = s.replace("|", " ").replace("(", " ").replace(")", " ")

    # Remove empty lines
    lines = [line for line in s.split('\n') if line.strip()]
    if len(lines) != 6:
      raise ValueError("should have six rows")

    # Convert to float
    try:
      mat = [map(float, line.split()) for line in lines]
    except:
      raise ValueError("not all entries are numbers")

    # Make it into a square matrix
    mat = np.array(mat)
    if mat.shape != (6,6):
      # Is it upper triangular?
      if map(len, mat) == [6,5,4,3,2,1]:
	mat = [ [0]*i + mat[i] for i in range(6) ]
        mat = np.array(mat)

      # Is it lower triangular?
      if map(len, mat) == [1,2,3,4,5,6]:
	mat = [ mat[i] + [0]*(5-i) for i in range(6) ]
        mat = np.array(mat)

    if mat.shape != (6,6):
      raise ValueError("should be a square matrix")

    # Check that is is symmetric, or make it symmetric
    if la.norm(np.tril(mat, -1)) == 0:
      mat = mat + np.triu(mat, 1).transpose()
    if la.norm(np.triu(mat, 1)) == 0:
      mat = mat + np.tril(mat, -1).transpose()
    if la.norm(mat - mat.transpose()) > 0:
      raise ValueError("should be symmetric, or triangular")

    # Store it
    self.CVoigt = mat

    # Put it in a more useful representation
    self.SVoigt = la.inv(self.CVoigt)
    VoigtMat = [[0, 5, 4], [5, 1, 3], [4, 3, 2]]
    def SVoigtCoeff(p,q): return 1. / ((1+p/3)*(1+q/3))

    self.Smat = [[[[ SVoigtCoeff(VoigtMat[i][j], VoigtMat[k][l]) * self.SVoigt[VoigtMat[i][j]][VoigtMat[k][l]]
                     for i in range(3) ] for j in range(3) ] for k in range(3) ] for l in range(3) ]
    return
Ejemplo n.º 23
0
def nMDS(distmat, Z, weights, Vp):
	dZ = eucD(Z)
	dZ[dZ==0] = 1E-5
	diss_f = distmat.ravel()
	dhat_f = dZ.ravel()
	dhat = isotonic(dhat_f, diss_f)
	dhat = dhat.prediction.reshape(distmat.shape)
	stress = np.sqrt(np.tril((weights*(dZ - dhat)**2)).sum() / np.tril(weights*dZ**2).sum())
	bZ = Bcalc(weights, dhat, dZ)
	Xu = Vp.dot(bZ).dot(Z)
	return stress, Xu
Ejemplo n.º 24
0
def collocate_at_nodes(mesh):

    for d in mesh.dList:
        # Set up collXYZ with an initial value
        coll_store = d.eList[0].vals(d.eList[0].limits[0], d.eList[0].limits[2])

        for e in d.eList:

            xi1 = np.linspace(e.limits[0], e.limits[1], e.m + 1)
            xi2 = np.linspace(e.limits[2], e.limits[3], e.n + 1)
            xi1, xi2 = np.meshgrid(xi1, xi2)
            e.collocationXi = np.vstack([xi1.reshape(-1), xi2.reshape(-1)])

            if e.__class__.__name__ == "SerendipityQuadraticElement":
                e.collocationXi = np.delete(e.collocationXi, 4, axis=1)

            e.collocation_points = e.vals(e.collocationXi[0], e.collocationXi[1])

            # Check for repeated points in new set
            px, py, pz = e.collocation_points
            px = px.reshape(-1, 1)
            py = py.reshape(-1, 1)
            pz = pz.reshape(-1, 1)
            qx, qy, qz = e.collocation_points
            rx = px - qx
            ry = py - qy
            rz = pz - qz
            rx = np.tril(rx, -1)[:, :-1] + np.triu(rx, 1)[:, 1:]
            ry = np.tril(ry, -1)[:, :-1] + np.triu(ry, 1)[:, 1:]
            rz = np.tril(rz, -1)[:, :-1] + np.triu(rz, 1)[:, 1:]
            r = np.sqrt(rx ** 2 + ry ** 2 + rz ** 2)
            delete = np.where(np.any(r < 1e-10, axis=1))[0]
            e.collocation_points = np.delete(e.collocation_points, delete[1:], axis=1)
            e.collocationXi = np.delete(e.collocationXi, delete[1:], axis=1)

            # Check for repeated points in large set
            px, py, pz = e.collocation_points
            px = px.reshape(-1, 1)
            py = py.reshape(-1, 1)
            pz = pz.reshape(-1, 1)

            qx, qy, qz = coll_store

            r = np.sqrt((qx - px) ** 2 + (qy - py) ** 2 + (qz - pz) ** 2)
            delete = np.where(np.any(r < 1e-10, axis=1))[0]

            coll_store = np.hstack([coll_store, np.delete(e.collocation_points, delete, axis=1)])

        d.collocation_points = coll_store

    # Apply to mesh
    mesh.collocation_points = np.hstack([d.collocation_points for d in mesh.dList])
    mesh.numBoundaryCollocation = mesh.collocation_points.shape[1]
Ejemplo n.º 25
0
def passed_test(dtype, as_matrix, x_is_row, y_is_row, provide_A, stride, uplo):
    """
    Run one symmetric rank-2 update test.

    Arguments:
        dtype:        either 'float64' or 'float32', the NumPy dtype to test
        as_matrix:    True to test a NumPy matrix, False to test a NumPy ndarray
        x_is_row:     True to test a row vector as parameter x, False to test a column vector
        y_is_row:     True to test a row vector as parameter y, False to test a column vector
        provide_A:    True if A is to be provided to the BLASpy function, False otherwise
        stride:       stride of x and y to test; if None, a random stride is assigned
        uplo:         BLASpy uplo parameter to test

    Returns:
        True if the expected result is within the margin of error of the actual result,
        False otherwise.
    """

    # generate random sizes for matrix/vector dimensions and vector stride (if necessary)
    n = randint(N_MIN, N_MAX)
    stride = randint(N_MIN, STRIDE_MAX) if stride is None else stride
    n_A = n / stride + (n % stride > 0)

    # create random scalars, vectors, and matrices to test
    alpha = uniform(SCAL_MIN, SCAL_MAX)
    x = random_vector(n, x_is_row, dtype, as_matrix)
    y = random_vector(n, y_is_row, dtype, as_matrix)
    A = random_symmetric_matrix(n_A, dtype, as_matrix) if provide_A else None

    # create copies/views of A, x, and y that can be used to calculate the expected result
    x_2 = x.T if x_is_row else x
    y_2 = y.T if y_is_row else y
    A_2 = zeros((n_A, n_A)) if A is None else copy(A)

    # compute the expected result
    if stride == 1:
        A_2 += alpha * dot(x_2, y_2.T)
        A_2 += alpha * dot(y_2, x_2.T)
    else:
        for i in range(0, n_A):
            for j in range(0, n_A):
                A_2[i, j] += alpha * (x_2[i * stride, 0] * y_2[j * stride, 0])
                A_2[i, j] += alpha * (y_2[i * stride, 0] * x_2[j * stride, 0])

    # get the actual result
    A = syr2(x, y, A, uplo, alpha, inc_x=stride, inc_y=stride)

    # make A and A_2 triangular so that they can be compared
    A = triu(A) if uplo == 'u' else tril(A)
    A_2 = triu(A_2) if uplo == 'u' else tril(A_2)

    # compare the actual result to the expected result and return result of the test
    return allclose(A, A_2, RTOL, ATOL)
Ejemplo n.º 26
0
def test_tril():
    for shape in [(10, 5), (5, 10), (10, 10)]:
        for order in ['c', 'f']:
            for inplace in [True, False]:
                ac, ag = gen_gpuarray(shape, 'float32',
                                      order=order, ctx=context)
                result = tril(ag, inplace=inplace)
                assert numpy.all(numpy.tril(ac) == result)
                if inplace:
                    assert numpy.all(numpy.tril(ac) == ag)
                else:
                    assert numpy.all(ac == ag)
Ejemplo n.º 27
0
def test_tril_triu():
    A = np.random.randn(20, 20)
    for chk in [5, 4]:
        dA = da.from_array(A, (chk, chk))

        assert np.allclose(da.triu(dA).compute(), np.triu(A))
        assert np.allclose(da.tril(dA).compute(), np.tril(A))

        for k in [-25, -20, -19, -15, -14, -9, -8, -6, -5, -1,
                  1, 4, 5, 6, 8, 10, 11, 15, 16, 19, 20, 21]:
            assert np.allclose(da.triu(dA, k).compute(), np.triu(A, k))
            assert np.allclose(da.tril(dA, k).compute(), np.tril(A, k))
Ejemplo n.º 28
0
Archivo: rc.py Proyecto: eSMCs/HDPy
 def train(self, sample, trg=None, err=None, d=None, e=None):
     """Train the regression on one or more samples.
     
     ``sample``
         Input samples. Array of size (K, input_dim)
     
     ``trg``
         Sample target. Array of size (K, output_dim)
     
     ``err``
         Sample error terms. Array of size (K, output_dim)
     
     """
     if self._stop_training:
         return
     
     if d is not None:
         warnings.warn("Use of argument 'd' is deprecated. Use 'trg' instead.")
         trg = d
     
     if e is not None:
         warnings.warn("Use of argument 'e' is deprecated. Use 'err' instead.")
         err = e
     
     if self.with_bias:
         sample = self._add_constant(sample)
     
     for i in range(sample.shape[0]):
         # preliminaries
         sample_i = np.atleast_2d(sample[i]).T
         psi_x = self._psi_inv.dot(sample_i)
         gain = 1.0 / (self.lambda_ + sample_i.T.dot(psi_x)) * psi_x
         # error
         if err is None:
             trg_i = np.atleast_2d(trg[i]).T
             pred = self.beta.T.dot(sample_i)
             err_i = trg_i - pred
         else:
             err_i = np.atleast_2d(err[i]).T
         # update
         self.beta += gain.dot(err_i.T)
         tri = np.tril(self._psi_inv)
         tri -= np.tril(gain*psi_x.T)
         tri /= self.lambda_
         #self._psi_inv = tri + tri.T - np.diag(tri.diagonal())
         # FIXME: (numpy bug) tri.diagonal() introduces a memory leak
         self._psi_inv = np.tril(tri, -1).T + tri
         
         # Diagonal stabilization
         self.cnt_train += 1
         if self.cnt_train % self.tau == 0:
             np.fill_diagonal(self._psi_inv, self.diag_default)
Ejemplo n.º 29
0
    def _process_data(self, data):
        """data array into usable data structures
        
        Create empty spin array
        Create structures for couplings between spins
        Create adjacency list for spins and neighbours
        """
        # map spins to contiguous positive integer values
        max_spin = np.amax(data[:,:-1]).astype(int)
        unique_spins = np.unique(data[:,:-1]).astype(int)
        spin_map = np.zeros((max_spin + 1), dtype=int)
        for i, spin in enumerate(unique_spins):
            spin_map[spin] = i

        # build couplings matrix, spin array and adjacency list
        self.size = unique_spins.size
        self.J = np.zeros((self.size, self.size), dtype=float)
        # this is lower triangular with self-couplings on the diagonal
        for i, j, J in data[data[:,0].argsort()]:
            i = spin_map[int(i)]
            j = spin_map[int(j)]
            # print "i: {}, j: {}, J: {}".format(i, j, J)
            if i >= j:
                self.J[i,j] = J
            else:
                self.J[j,i] = J
        
        if np.max(np.absolute(self.J)) > 1:
            self.scaling_factor = 1e5
        self.J /= self.scaling_factor
        self.h = np.diag(self.J).copy()
        np.fill_diagonal(self.J, 0)
        # ensure J is formed properly
        assert(self.J.any() == True)
        assert(np.triu(self.J).any() == False)
        assert(np.tril(self.J).any() == True)
        
        self.J = np.tril(self.J)
        assert(np.triu(self.J).any() == False)
        self.J += np.tril(self.J).T
        
        # about the adjacency list:
        # it is possible to just use h and J with the spins array
        # in order to do all the calculations using linear algebra,
        # but when these problems are sparse, an adjacency list works
        # much faster -- and in a spinglass, spins often have only two or
        # three neighbours
        self.adjacency = [[j for j, x in enumerate(self.J[i]) if x != 0] for i in xrange(self.size)]
        
        # lock arrays to prevent accidental mutations
        self.J.flags.writeable = False
        self.h.flags.writeable = False
Ejemplo n.º 30
0
    def test_syr2k(self):
        for f in _get_func('syr2k'):
            c = f(a=self.a, b=self.b, alpha=1.)
            assert_array_almost_equal(np.triu(c), np.triu(self.t))

            c = f(a=self.a, b=self.b, alpha=1., lower=1)
            assert_array_almost_equal(np.tril(c), np.tril(self.t))

            c0 = np.ones(self.t.shape)
            c = f(a=self.a, b=self.b, alpha=1., beta=1., c=c0)
            assert_array_almost_equal(np.triu(c), np.triu(self.t+c0))

            c = f(a=self.a, b=self.b, alpha=1., trans=1)
            assert_array_almost_equal(np.triu(c), np.triu(self.tt))
Ejemplo n.º 31
0
 def args_maker():
     a = rng(lhs_shape, dtype)
     if sym_pos:
         a = onp.matmul(a, onp.conj(T(a)))
         a = onp.tril(a) if lower else onp.triu(a)
     return [a, rng(rhs_shape, dtype)]
def compute_similarity(F, inds_to_compare):  # inds_to_compare: feature indices
    features_to_compare = F[inds_to_compare, :]
    CORRMAT = np.corrcoef(features_to_compare)
    similarity = np.mean(np.ma.masked_equal(np.tril(CORRMAT, -1), 0))
    #np.mean(np.ma.masked_equal(np.tril(CORRMAT, -1), 0))#(np.tril(CORRMAT)) # (np.ma.masked_equal(np.tril(CORRMAT, -1), 0))
    return similarity
Ejemplo n.º 33
0
def klip_math(sci,
              ref_psfs,
              numbasis,
              covar_psfs=None,
              return_basis=False,
              return_basis_and_eig=False):
    """
    Helper function for KLIP that does the linear algebra
    
    Args:
        sci: array of length p containing the science data
        ref_psfs: N x p array of the N reference PSFs that 
                  characterizes the PSF of the p pixels
        numbasis: number of KLIP basis vectors to use (can be an int or an array of ints of length b)
        covar_psfs: covariance matrix of reference psfs passed in so you don't have to calculate it here
        return_basis: If true, return KL basis vectors (used when onesegment==True)
        return_basis_and_eig: If true, return KL basis vectors as well as the eigenvalues and eigenvectors of the
                                covariance matrix. Used for KLIP Forward Modelling of Laurent Pueyo.

    Returns:
        sub_img_rows_selected: array of shape (p,b) that is the PSF subtracted data for each of the b KLIP basis
                               cutoffs. If numbasis was an int, then sub_img_row_selected is just an array of length p
        KL_basis: array of shape (max(numbasis),p). Only if return_basis or return_basis_and_eig is True.
        evals: Eigenvalues of the covariance matrix. The covariance matrix is assumed NOT to be normalized by (p-1).
                Only if return_basis_and_eig is True.
        evecs: Eigenvectors of the covariance matrix. The covariance matrix is assumed NOT to be normalized by (p-1).
                Only if return_basis_and_eig is True.
    """
    # for the science image, subtract the mean and mask bad pixels
    sci_mean_sub = sci - np.nanmean(sci)
    # sci_nanpix = np.where(np.isnan(sci_mean_sub))
    # sci_mean_sub[sci_nanpix] = 0

    # do the same for the reference PSFs
    # playing some tricks to vectorize the subtraction
    ref_psfs_mean_sub = ref_psfs - np.nanmean(ref_psfs, axis=1)[:, None]
    ref_psfs_mean_sub[np.where(np.isnan(ref_psfs_mean_sub))] = 0

    # calculate the covariance matrix for the reference PSFs
    # note that numpy.cov normalizes by p-1 to get the NxN covariance matrix
    # we have to correct for that a few lines down when consturcting the KL
    # vectors since that's not part of the equation in the KLIP paper
    if covar_psfs is None:
        covar_psfs = np.cov(ref_psfs_mean_sub)

    # maximum number of KL modes
    tot_basis = covar_psfs.shape[0]

    # only pick numbasis requested that are valid. We can't compute more KL basis than there are reference PSFs
    # do numbasis - 1 for ease of indexing since index 0 is using 1 KL basis vector
    numbasis = np.clip(
        numbasis - 1, 0, tot_basis -
        1)  # clip values, for output consistency we'll keep duplicates
    max_basis = np.max(
        numbasis
    ) + 1  # maximum number of eigenvectors/KL basis we actually need to use/calculate

    # calculate eigenvalues and eigenvectors of covariance matrix, but only the ones we need (up to max basis)
    evals, evecs = la.eigh(covar_psfs,
                           eigvals=(tot_basis - max_basis, tot_basis - 1))

    # check if there are negative eignevalues as they will cause NaNs later that we have to remove
    # the eigenvalues are ordered smallest to largest
    #check_nans = evals[-1] < 0 # currently this checks that *all* the evals are neg, but we want just one.
    # also, include 0 because that is a bad value too
    check_nans = np.any(
        evals <= 0)  # alternatively, check_nans = evals[0] <= 0

    # scipy.linalg.eigh spits out the eigenvalues/vectors smallest first so we need to reverse
    # we're going to recopy them to hopefully improve caching when doing matrix multiplication
    evals = np.copy(evals[::-1])
    evecs = np.copy(
        evecs[:, ::-1], order='F'
    )  #fortran order to improve memory caching in matrix multiplication

    # keep an index of the negative eignevalues for future reference if there are any
    if check_nans:
        neg_evals = (np.where(evals <= 0))[0]

    # calculate the KL basis vectors
    kl_basis = np.dot(ref_psfs_mean_sub.T, evecs)
    # JB question: Why is there this [None, :]? (It adds an empty first dimension)
    kl_basis = kl_basis * (1. / np.sqrt(
        evals * (np.size(sci) - 1)))[None, :]  #multiply a value for each row

    # sort to KL basis in descending order (largest first)
    # kl_basis = kl_basis[:,eig_args_all]

    # duplicate science image by the max_basis to do simultaneous calculation for different k_KLIP
    sci_mean_sub_rows = np.tile(sci_mean_sub, (max_basis, 1))
    sci_rows_selected = np.tile(
        sci_mean_sub,
        (np.size(numbasis), 1))  # this is the output image which has less rows

    # bad pixel mask
    # do it first for the image we're just doing computations on but don't care about the output
    sci_nanpix = np.where(np.isnan(sci_mean_sub_rows))
    sci_mean_sub_rows[sci_nanpix] = 0
    # now do it for the output image
    sci_nanpix = np.where(np.isnan(sci_rows_selected))
    sci_rows_selected[sci_nanpix] = 0

    # do the KLIP equation, but now all the different k_KLIP simultaneously
    # calculate the inner product of science image with each of the different kl_basis vectors
    # TODO: can we optimize this so it doesn't have to multiply all the rows because in the next lines we only select some of them
    inner_products = np.dot(sci_mean_sub_rows,
                            np.require(kl_basis, requirements=['F']))
    # select the KLIP modes we want for each level of KLIP by multiplying by lower diagonal matrix
    lower_tri = np.tril(np.ones([max_basis, max_basis]))
    inner_products = inner_products * lower_tri
    # if there are NaNs due to negative eigenvalues, make sure they don't mess up the matrix multiplicatoin
    # by setting the appropriate values to zero
    if check_nans:
        needs_to_be_zeroed = np.where(lower_tri == 0)
        inner_products[needs_to_be_zeroed] = 0
        # make a KLIP PSF for each amount of klip basis, but only for the amounts of klip basis we actually output
        kl_basis[:, neg_evals] = 0
        klip_psf = np.dot(inner_products[numbasis, :], kl_basis.T)
        # for KLIP PSFs that use so many KL modes that they become nans, we have to put nan's back in those
        badbasis = np.where(
            numbasis >=
            np.min(neg_evals))  #use basis with negative eignevalues
        klip_psf[badbasis[0], :] = np.nan
    else:
        # make a KLIP PSF for each amount of klip basis, but only for the amounts of klip basis we actually output
        klip_psf = np.dot(inner_products[numbasis, :], kl_basis.T)

    # make subtracted image for each number of klip basis
    sub_img_rows_selected = sci_rows_selected - klip_psf

    # restore NaNs
    sub_img_rows_selected[sci_nanpix] = np.nan

    if return_basis is True:
        return sub_img_rows_selected.transpose(), kl_basis.transpose()
    elif return_basis_and_eig is True:
        return sub_img_rows_selected.transpose(), kl_basis.transpose(
        ), evals * (np.size(sci) - 1), evecs
    else:
        return sub_img_rows_selected.transpose()
Ejemplo n.º 34
0
def create_synth_data(data_type='bool', noise_factor=0):
    n_data = N_DATA
    n_dim = N_DIM

    S = np.zeros((n_dim, n_data, n_data))  # Similarity tensor
    cluster_members = [50, 100, 200]

    # Contruct labels
    y = []
    for i, c in enumerate(cluster_members):
        y.extend(np.ones(c) * i)

    intra_c_prob = np.array([[0.8, 0.1, 0.7], [0.05, 0.4, 0.06],
                             [0.6, 0.8, 0.67], [0.6, 0.4, 0.1]])
    # noise_d = np.array([0.25,0.15,0.45,0.22]) + noise_factor
    noise_d = np.array([0, 0, 0, 0]) + noise_factor
    intra_cluster_proba = 0.1

    for d in range(n_dim):
        i = 0
        for c in range(len(cluster_members)):
            if data_type == "bool":
                S[d][i:i + cluster_members[c],
                     i:i + cluster_members[c]] = np.random.binomial(
                         1, intra_c_prob[d][c],
                         size=cluster_members[c]**2).reshape(
                             (cluster_members[c], cluster_members[c]))
            elif data_type == 'linear':
                S[d][i:i + cluster_members[c],
                     i:i + cluster_members[c]] = np.random.normal(
                         loc=intra_c_prob[d][c],
                         scale=0.2,
                         size=cluster_members[c]**2).reshape(
                             (cluster_members[c], cluster_members[c]))
            else:
                raise ValueError

            i = i + cluster_members[c]

        # Add noise
        if data_type == "bool":
            S[d] = S[d] + np.random.binomial(
                1, noise_d[d], size=S[d].size
            ).reshape(
                S[d].shape
            )  #np.random.normal(loc=noise_d[d],scale=0.9,size=S[d].size).reshape(S[d].shape)
        elif data_type == 'linear':
            S[d] = S[d] + np.random.normal(
                loc=0, scale=noise_d[d], size=S[d].size).reshape(S[d].shape)
        else:
            raise ValueError

        # Make symmetric
        S[d] = (np.tril(S[d]) + np.triu(S[d].T, 1))

        S[d] = np.where(S[d] < 0, 0, S[d])  # remove negative

        if data_type == "bool":
            S[d] = np.where(S[d] > 1, 1, S[d])  # Remove val > 1
        elif data_type == "linear":
            S[d] = np.where(S[d] > 1, 0, S[d])  # Remove val > 1

        # Set diag to single value
        di, dj = np.diag_indices_from(S[d])
        S[d][di, dj] = 1

    return S, y
Ejemplo n.º 35
0
def norm_distance(distance):
    L = np.tril(distance, -1)
    min_max_scaler = preprocessing.MinMaxScaler()
    distance_tran = min_max_scaler.fit_transform(L)
    distance_tran = distance_tran + np.transpose(distance_tran)
    return distance_tran
Ejemplo n.º 36
0
inputs = np.concatenate((full_X_jets, full_X_mu, full_X_el), axis=1)
weights = np.concatenate((full_jet_w, full_mu_w, full_el_w), axis=1)
labels = np.concatenate((full_jet_labels, full_mu_labels, full_el_labels))

koop = np.linspace(0, 69, 70)

correlation = np.abs(np.corrcoef(weights.transpose()))
correlation_in = np.abs(np.corrcoef(inputs.transpose()))
rms_val = rms(weights)
select_rms = rms_val < 0.008
matrix_rms = rms_val * np.transpose(rms_val)

selection = (correlation < 0.1) | (correlation_in > 0.6)

correlation_in = np.abs(np.tril(correlation_in, -1))
correlation = np.abs(np.tril(correlation, -1))
correlation[selection] = 0
correlation[select_rms, :] = 0
correlation[:, select_rms] = 0
rms_corr = np.multiply(matrix_rms, correlation)
idx = largest_indices(rms_corr, 10)
print labels[idx[0]]
print labels[idx[1]]
#print labels[idx_in[0]]
#print labels[idx_in[1]]

plt.figure(figsize=(14, 14))
plt.imshow(rms_corr, cmap='jet', interpolation='nearest')
plt.xticks(koop, labels, rotation=90)
plt.yticks(koop, labels)
Ejemplo n.º 37
0
    if len(B.shape) != 1:
        raise ValueError('B no es un vector')
    if fA != B.shape[0]:
        raise ValueError('No coinciden la matriz A y el vector B')
    if not diago.all():
        raise ValueError('A es una matriz singular')

    x = np.zeros_like(B, dtype=float)
    for k in np.arange(fA):
        suma = np.dot(A[k, :k], x[:k])
        x[k] = (B[k] - suma) / A[k, k]
    return A, x


print('Matriz triangular inferior:')
E = np.tril(A)
print(sltrinf(E, B))
soltrinf = la.solve(E, B)
print(soltrinf)

#MÉTODO GAUSS


def slgauss(A, b):
    A = A.astype(float)
    b = b.astype(float)
    x = np.zeros_like(b, dtype=float)
    fA, cA = A.shape
    for i in range(fA):
        if A[i, i] == 0:
            raise ValueError('pivot becomes null')
Ejemplo n.º 38
0
    def sample_trajectory(self, T=10000, sample_freq=10):
        n = self.n_balls
        assert (T % sample_freq == 0)
        T_save = int(T / sample_freq - 1)
        diag_mask = np.ones((n, n), dtype=bool)
        np.fill_diagonal(diag_mask, 0)
        counter = 0

        if self.uniform_draw:

            total_num_edges = int(0.5 * self.n_balls * (self.n_balls - 1))

            num_edges = random.randint(0, total_num_edges)
            edges = [0 for i in range(total_num_edges)]
            for i in range(num_edges):
                edges[i] = 1
            random.shuffle(edges)
            spring_edges = np.zeros((self.n_balls, self.n_balls))
            spring_edges[np.triu_indices(self.n_balls, 1)] = np.array(edges)
            spring_edges.T[np.triu_indices(self.n_balls, 1)] = np.array(edges)

            charges = [0 for i in range(self.n_balls)]
            n_c = random.randint(
                1, self.n_balls)  # choose a random number of charges, 1 to 5
            for i in range(n_c):
                charges[i] = 1
            random.shuffle(charges)
            charges = np.expand_dims(np.array(charges), -1)
            charge_edges = charges.dot(charges.transpose()).astype('float')

        else:
            spring_edges = np.random.choice(
                self.
                spring_types,  # self.spring_types is an array of relative spring strengths eg. [0., 0.5, 1.]
                size=(self.n_balls, self.n_balls),
                p=self.spring_prob)  # prob. of each spring type
            spring_edges = np.tril(spring_edges) + np.tril(
                spring_edges, -1).T  # this makes the edges matrix symmetric
            np.fill_diagonal(spring_edges, 0)  # remove self loops

            # Sample charge edges
            charges = np.random.choice(self.charge_types,
                                       size=(self.n_balls, 1),
                                       p=self.charge_prob)
            charge_edges = charges.dot(charges.transpose())
            #np.fill_diagonal(charge_edges, 0)                    # remove self loops

        # Initialize location and velocity
        loc = np.zeros((T_save, 2, n))
        vel = np.zeros((T_save, 2, n))
        loc_next = np.random.randn(
            2, n) * self.loc_std  # randn samples from a unit normal dist.
        vel_next = np.random.randn(2, n)
        v_norm = np.sqrt((vel_next**2).sum(axis=0)).reshape(1, -1)
        vel_next = vel_next * self.vel_norm / v_norm
        loc[0, :, :], vel[0, :, :] = self._clamp(loc_next, vel_next)

        # disables division by zero warning, since I fix it with fill_diagonal
        with np.errstate(divide='ignore', invalid='ignore'):

            spring_forces_size = -self.spring_interaction_strength * spring_edges
            #np.fill_diagonal(spring_forces_size, 0)  # self forces are zero (fixes division by zero)

            l2_dist_power3 = np.power(
                self._l2(loc_next.transpose(), loc_next.transpose()), 3. / 2.)
            # size of forces up to a 1/|r| factor
            # since I later multiply by an unnormalized r vector
            charge_forces_size = -self.charge_interaction_strength * charge_edges / l2_dist_power3
            np.fill_diagonal(
                charge_forces_size,
                0)  # self forces are zero (fixes division by zero)
            #assert (np.abs(charge_forces_size[diag_mask]).min() > 1e-10)

            F_s = (spring_forces_size.reshape(1, n, n) * np.concatenate(
                (np.subtract.outer(loc_next[0, :], loc_next[0, :]).reshape(
                    1, n, n), np.subtract.outer(loc_next[1, :],
                                                loc_next[1, :]).reshape(
                                                    1, n, n)))).sum(axis=-1)
            #assert (np.abs(charge_forces_size[diag_mask]).min() > 1e-10)
            F_c = (charge_forces_size.reshape(1, n, n) * np.concatenate(
                (np.subtract.outer(loc_next[0, :], loc_next[0, :]).reshape(
                    1, n, n), np.subtract.outer(loc_next[1, :],
                                                loc_next[1, :]).reshape(
                                                    1, n, n)))).sum(axis=-1)
            F = F_s + F_c

            F[F > self._max_F] = self._max_F
            F[F < -self._max_F] = -self._max_F

            vel_next += self._delta_T * F

            for i in range(1, T):
                loc_next += self._delta_T * vel_next
                loc_next, vel_next = self._clamp(loc_next, vel_next)

                if i % sample_freq == 0:
                    loc[counter, :, :], vel[counter, :, :] = loc_next, vel_next
                    counter += 1

                l2_dist_power3 = np.power(
                    self._l2(loc_next.transpose(), loc_next.transpose()),
                    3. / 2.)
                # size of forces up to a 1/|r| factor
                # since I later multiply by an unnormalized r vector
                charge_forces_size = self.charge_interaction_strength * charge_edges / l2_dist_power3
                np.fill_diagonal(
                    charge_forces_size,
                    0)  # self forces are zero (fixes division by zero)

                F_s = (spring_forces_size.reshape(1, n, n) * np.concatenate(
                    (np.subtract.outer(loc_next[0, :], loc_next[0, :]).reshape(
                        1, n, n),
                     np.subtract.outer(loc_next[1, :], loc_next[1, :]).reshape(
                         1, n, n)))).sum(axis=-1)
                #assert (np.abs(charge_forces_size[diag_mask]).min() > 1e-10)
                F_c = (charge_forces_size.reshape(1, n, n) * np.concatenate(
                    (np.subtract.outer(loc_next[0, :], loc_next[0, :]).reshape(
                        1, n, n),
                     np.subtract.outer(loc_next[1, :], loc_next[1, :]).reshape(
                         1, n, n)))).sum(axis=-1)
                F = F_s + F_c

                F[F > self._max_F] = self._max_F
                F[F < -self._max_F] = -self._max_F
                vel_next += self._delta_T * F

            # Add noise to observations
            loc += np.random.randn(T_save, 2, self.n_balls) * self.noise_var
            vel += np.random.randn(T_save, 2, self.n_balls) * self.noise_var
            np.fill_diagonal(charge_edges, 0)
            edges = np.concatenate((np.expand_dims(
                spring_edges, 0), np.expand_dims(charge_edges, 0)),
                                   axis=0)

            return loc, vel, edges
Ejemplo n.º 39
0
def compute_and_plot_specrad():
    """
    Compute and plot spectral radius of smoother iteration matrix for a whole range of eigenvalues
    Returns:

    """
    # setup_list = [('LU', 'to0'), ('LU', 'toinf'), ('IE', 'to0'), ('IE', 'toinf')]
    # setup_list = [('LU', 'to0'), ('LU', 'toinf')]
    # setup_list = [('IE', 'to0'), ('IE', 'toinf')]
    # setup_list = [('LU', 'toinf'), ('IE', 'toinf')]
    setup_list = [('IE', 'full'), ('LU', 'full')]
    setup_list = [('EX', 'to0'), ('PIC', 'to0')]

    # set up plotting parameters
    params = {'legend.fontsize': 20,
              'figure.figsize': (12, 8),
              'axes.labelsize': 20,
              'axes.titlesize': 20,
              'xtick.labelsize': 16,
              'ytick.labelsize': 16,
              'lines.linewidth': 3
              }
    plt.rcParams.update(params)

    Nnodes = 5
    Nsteps = 1

    coll = CollGaussRadau_Right(Nnodes, 0, 1)
    Qmat = coll.Qmat[1:, 1:]

    Nmat = np.zeros((Nnodes, Nnodes))
    Nmat[:, -1] = 1

    Emat = np.zeros((Nsteps, Nsteps))
    np.fill_diagonal(Emat[1:, :], 1)

    for qd_type, conv_type in setup_list:

        if qd_type == 'LU':

            QT = coll.Qmat[1:, 1:].T
            [_, _, U] = LA.lu(QT, overwrite_a=True)
            QDmat = U.T

        elif qd_type == 'IE':

            QI = np.zeros(np.shape(coll.Qmat))
            for m in range(coll.num_nodes + 1):
                QI[m, 1:m + 1] = coll.delta_m[0:m]
            QDmat = QI[1:, 1:]

        elif qd_type == 'EE':

            QE = np.zeros(np.shape(coll.Qmat))
            for m in range(coll.num_nodes + 1):
                QE[m, 0:m] = coll.delta_m[0:m]
            QDmat = QE[1:, 1:]

        elif qd_type == 'PIC':

            QDmat = np.zeros(np.shape(coll.Qmat[1:, 1:]))

        elif qd_type == 'EX':

            QT = coll.Qmat[1:, 1:].T
            [_, _, U] = LA.lu(QT, overwrite_a=True)
            QDmat = np.tril(U.T, k=-1)
            print(QDmat)

        else:
            raise NotImplementedError('qd_type %s is not implemented' % qd_type)

        # lim_specrad = max(abs(np.linalg.eigvals(np.eye(Nnodes) - np.linalg.inv(QDmat).dot(Qmat))))
        # print('qd_type: %s -- lim_specrad: %6.4e -- conv_type: %s' % (qd_type, lim_specrad, conv_type))

        if conv_type == 'to0':

            ilim_left = -4
            ilim_right = 2
            rlim_left = 2
            rlim_right = -4

        elif conv_type == 'toinf':

            ilim_left = 0
            ilim_right = 11
            rlim_left = 6
            rlim_right = 0

        elif conv_type == 'full':

            ilim_left = -10
            ilim_right = 11
            rlim_left = 10
            rlim_right = -11

        else:
            raise NotImplementedError('conv_type %s is not implemented' % conv_type)

        ilam_list = 1j * np.logspace(ilim_left, ilim_right, 201)
        rlam_list = -1 * np.logspace(rlim_left, rlim_right, 201)

        assert (rlim_right - rlim_left + 1) % 5 == 0
        assert (ilim_right - ilim_left - 1) % 5 == 0
        assert (len(rlam_list) - 1) % 5 == 0
        assert (len(ilam_list) - 1) % 5 == 0

        Prho = np.zeros((len(rlam_list), len(ilam_list)))

        for idr, rlam in enumerate(rlam_list):
            for idi, ilam in enumerate(ilam_list):
                dxlam = rlam + ilam

                mat = np.linalg.inv(np.eye(Nnodes * Nsteps) - dxlam * np.kron(np.eye(Nsteps), QDmat)).dot(
                    dxlam * np.kron(np.eye(Nsteps), (Qmat - QDmat)) + np.kron(Emat, Nmat))
                mat = np.linalg.matrix_power(mat, Nnodes)

                Prho[idr, idi] = max(abs(np.linalg.eigvals(mat)))

        print(np.amax(Prho))

        fig, ax = plt.subplots(figsize=(15, 10))

        ax.set_xticks([i + 0.5 for i in range(0, len(rlam_list), int(len(rlam_list) / 5))])
        ax.set_xticklabels([r'-$10^{%d}$' % i for i in range(rlim_left, rlim_right,
                                                             int((rlim_right - rlim_left + 1) / 5))])
        ax.set_yticks([i + 0.5 for i in range(0, len(ilam_list), int(len(ilam_list) / 5))])
        ax.set_yticklabels([r'$10^{%d}i$' % i for i in range(ilim_left, ilim_right,
                                                             int((ilim_right - ilim_left - 1) / 5))])

        cmap = plt.get_cmap('Reds')
        pcol = plt.pcolor(Prho.T, cmap=cmap, norm=LogNorm(vmin=1E-09, vmax=1E-00))

        plt.colorbar(pcol)

        plt.xlabel(r'$Re(\Delta t\lambda)$')
        plt.ylabel(r'$Im(\Delta t\lambda)$')

        fname = 'data/heatmap_smoother_' + conv_type + '_Nsteps' + str(Nsteps) + '_M' + \
                str(Nnodes) + '_' + qd_type + '.png'
        plt.savefig(fname, rasterized=True, transparent=True, bbox_inches='tight')
 def __init__(self, xTr, yTr, xTe=None, yTe=None, **kwargs):
     self.xTr = xTr
     self.yTr = yTr
     self.xTe = xTe
     self.yTe = yTe
     if 'num_inducing_points' in kwargs.keys():
         self.num_inducing_points = min(xTr.shape[0], \
                                        kwargs['num_inducing_points'])
     else:
         self.num_inducing_points = min(xTr.shape[0], 10)
     if 'quad_deg' in kwargs.keys():
         self.quad = GaussHermiteQuadrature(kwargs['quad_deg'])
     else:
         self.quad = GaussHermiteQuadrature(30)
     if 'kernel_type' in kwargs.keys():
         self.kernel_type = kwargs['kernel_type']
     else:
         self.kernel_type = 'rbf'
     if 'kernel_args' in kwargs.keys():
         self.kernel_args = kwargs['kernel_args']
     else:
         self.kernel_args = {'gamma': None}
     if 'learning_rate' in kwargs.keys():
         self.learning_rate = kwargs['learning_rate']
     else:
         self.learning_rate = 0.01
     self.r0 = self.learning_rate
     if 'alpha' in kwargs.keys():
         self.alpha = kwargs['alpha']
     else:
         self.alpha = 0.2
     if 'verbose' in kwargs.keys():
         self.verbose = kwargs['verbose']
     else:
         self.verbose = 0
     if 'max_iter' in kwargs.keys():
         self.max_iter = kwargs['max_iter']
     else:
         self.max_iter = 10000
     if 'tolerance' in kwargs.keys():
         self.tolerance = kwargs['tolerance']
     else:
         self.tolerance = 1.0
     if xTr is None or yTr is None:
         raise Exception('None training data error')
     else:
         M = self.num_inducing_points
         self.labels = np.unique(yTr)
         self.num_classes = len(self.labels)
         C = self.num_classes
         self.labels_dist = []
         for i in range(C):
             indices, = np.where(self.yTr == self.labels[i])
             self.labels_dist.append(indices)
         self.inducing_points = xTr[self.sample_x(M)]
         if self.verbose > 0:
             print('computing kernel matrices...')
         self.Kmm = self.kernel(self.inducing_points, self.inducing_points)
         self.Kmm_inv = cho_inverse(self.Kmm)
         self.Knn = self.kernel(xTr, xTr)
         self.Knm = self.kernel(xTr, self.inducing_points)
         self.A = self.Knm.dot(self.Kmm_inv)
         self.mask = np.tril(np.ones((M, M))).ravel()
         if self.verbose > 0:
             print('finished.')
         self.parameters = np.zeros(M * C + M * M * C)
         self.parameters_best = None
         for j in range(C):
             self.parameters[M * j:M * (j + 1)] = np.ones(M)
             self.parameters[M * C + M * M * j:M * C + M * M *
                             (j + 1)] = np.eye(M).ravel()
Ejemplo n.º 41
0
def test_triul(shape, k):
    s = sparse.random(shape, density=0.5)
    x = s.todense()

    assert_eq(np.triu(x, k), sparse.triu(s, k))
    assert_eq(np.tril(x, k), sparse.tril(s, k))
Ejemplo n.º 42
0
fname = sys.argv[1] # tsv filename: gen,clusterid,cost,genestart,chromid
dirpng = "Plots/"
label = fname.split('/')[-1].split('-')[0]
dirpng = dirpng + label
path = Path(dirpng).mkdir(parents=True, exist_ok=True)
logprint("Plot dir: %s" % dirpng)
logprint("Label: %s" % label)

# ALL CHROMOSOME WORK
print("Getting correlation")
df = pd.read_csv(fname, sep = "\t")
cor =  df.iloc[:,1:df.shape[1]-2].T.corr()

print("Ploting")
plt.figure(figsize=(10,10),dpi=300)
masking = np.tril(cor)
sns.heatmap(cor ,xticklabels=False, yticklabels=False, 
	mask=masking, vmin=-1., vmax=1., square=True,
	# cbar_kws={"shrink": 0.75}, cmap=RdBu_11.mpl_colormap)
	cbar_kws={"shrink": 0.75}, cmap=plt.get_cmap('seismic'))
plt.title('Pearson correlation' + label)
plt.ylabel('Gene start position')
plt.xlabel('Gene start position')
plt.tight_layout()
pngname = dirpng + '/' + label + '-allchroms.png'
plt.savefig(pngname)
plt.clf()
plt.close() # to clean memory


# BY CROMOSOME WORK
def simulate_match(foot_model, homeTeam, awayTeam, max_goals=10):
    home_goals_avg = foot_model.predict(
        pd.DataFrame(data={
            'team': homeTeam,
            'opponent': awayTeam,
            'home': 1
        },
                     index=[1])).values[0]
    away_goals_avg = foot_model.predict(
        pd.DataFrame(data={
            'team': awayTeam,
            'opponent': homeTeam,
            'home': 0
        },
                     index=[1])).values[0]
    team_pred = [[poisson.pmf(i, team_avg) for i in range(0, max_goals + 1)]
                 for team_avg in [home_goals_avg, away_goals_avg]]
    return (np.outer(np.array(team_pred[0]), np.array(team_pred[1])))


prob = simulate_match(poisson_model, hteam, ateam, max_goals=10)
win = np.sum(np.tril(prob, -1))
draw = np.sum(np.diag(prob))
loss = np.sum(np.triu(prob, 1))
print("home team win percentage")
print(win * 100)
print("Away team win percentage")
print(loss * 100)
print("Draw percentage")
print(draw * 100)
Ejemplo n.º 44
0
def _get_symmat(size):
    np.random.seed(1)
    A = np.random.randint(1, 21, (size, size))
    lA = np.tril(A)
    return lA.dot(lA.T)
Ejemplo n.º 45
0
def simulate_dags(d, s0, K, rho, graph_type):
    """Simulate random DAG with some expected number of edges.

    Args:
        d (int): num of nodes
        s0 (int): expected num of edges
        K (int): number of groups
        rho (double): [0,1)- variations of DAGs in different groups
        graph_type (str): ER, SF

    Returns:
        B (np.ndarray): [K, d, d] binary adj matrix of DAG
    """
    def _random_permutation(M):
        # np.random.permutation permutes first axis only
        P = np.random.permutation(np.eye(M.shape[0]))
        return P.T @ M @ P

    def _random_acyclic_orientation(B_und):
        return np.tril(_random_permutation(B_und), k=-1)

    def _graph_to_adjmat(G):
        return np.array(G.get_adjacency().data)

    if graph_type == 'ER':
        # Erdos-Renyi
        G_und = ig.Graph.Erdos_Renyi(n=d, m=s0)
        B_und = _graph_to_adjmat(G_und)
        B = _random_acyclic_orientation(B_und)
    elif graph_type == 'SF':
        # Scale-free, Barabasi-Albert
        G = ig.Graph.Barabasi(n=d, m=int(round(s0 / d)), directed=True)
        B = _graph_to_adjmat(G)
    elif graph_type == 'BP':
        # Bipartite, Sec 4.1 of (Gu, Fu, Zhou, 2018)
        top = int(0.2 * d)
        G = ig.Graph.Random_Bipartite(top,
                                      d - top,
                                      m=s0,
                                      directed=True,
                                      neimode=ig.OUT)
        B = _graph_to_adjmat(G)
    else:
        raise ValueError('unknown graph type')

    # add group variations
    B_list = []
    Btemp = np.triu(-np.ones([d, d]), k=0)
    Btemp = Btemp + B
    avai_index = np.where(Btemp == 0)
    E = np.shape(np.where(Btemp == 1))[1]
    L = np.shape(avai_index)[1]
    P = np.random.permutation(np.eye(d))
    for k in range(K):
        Bk = Btemp
        vedge_flag = np.random.rand(L) <= rho * E / (2 * L)
        Bk[avai_index[0][vedge_flag], avai_index[1][vedge_flag]] = 1
        Bk = np.tril(Bk, k=-1)
        B_perm = P.T @ Bk @ P
        B_list.append(B_perm)
        assert ig.Graph.Adjacency(B_perm.tolist()).is_dag()
    return B_list
Ejemplo n.º 46
0
print('\n\n')

print(np.logspace(3,10,50))


#Building matrices
#Let X = np.array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]). Get the diagonal of X, that is, [0, 5, 10].
print('\n\n')


X = np.array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]])
print(np.diag(X))

print('\n\n')
#Create a 2-D array whose diagonal equals [1, 2, 3, 4] and 0's elsewhere.

print(np.diagflat([1,2,3,4]))


print('\n\n')
#Create an array which looks like below. array([[ 0, 0, 0], [ 4, 0, 0], [ 7, 8, 0], [10, 11, 12]])
print(np.tril(np.arange(1,13).reshape(4,3),-1))


print('\n\n')
#Create an array which looks like below. array([[ 1, 2, 3], [ 4, 5, 6], [ 0, 8, 9], [ 0, 0, 12]])
print(np.triu(np.arange(1,13).reshape(4,3),-1))


Ejemplo n.º 47
0
    def testTrilExecution(self):
        a = arange(24, chunk_size=2).reshape(2, 3, 4)

        t = tril(a)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.tril(np.arange(24).reshape(2, 3, 4))
        np.testing.assert_equal(res, expected)

        t = tril(a, k=1)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.tril(np.arange(24).reshape(2, 3, 4), k=1)
        np.testing.assert_equal(res, expected)

        t = tril(a, k=2)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.tril(np.arange(24).reshape(2, 3, 4), k=2)
        np.testing.assert_equal(res, expected)

        t = tril(a, k=-1)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.tril(np.arange(24).reshape(2, 3, 4), k=-1)
        np.testing.assert_equal(res, expected)

        t = tril(a, k=-2)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.tril(np.arange(24).reshape(2, 3, 4), k=-2)
        np.testing.assert_equal(res, expected)

        a = arange(12, chunk_size=2).reshape(3, 4).tosparse()

        t = tril(a)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.tril(np.arange(12).reshape(3, 4))
        self.assertIsInstance(res, SparseNDArray)
        np.testing.assert_equal(res, expected)

        t = tril(a, k=1)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.tril(np.arange(12).reshape(3, 4), k=1)
        self.assertIsInstance(res, SparseNDArray)
        np.testing.assert_equal(res, expected)

        t = tril(a, k=2)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.tril(np.arange(12).reshape(3, 4), k=2)
        self.assertIsInstance(res, SparseNDArray)
        np.testing.assert_equal(res, expected)

        t = tril(a, k=-1)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.tril(np.arange(12).reshape(3, 4), k=-1)
        self.assertIsInstance(res, SparseNDArray)
        np.testing.assert_equal(res, expected)

        t = tril(a, k=-2)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.tril(np.arange(12).reshape(3, 4), k=-2)
        self.assertIsInstance(res, SparseNDArray)
        np.testing.assert_equal(res, expected)
Ejemplo n.º 48
0
def _Phi(A):
    """Return lower-triangle of matrix and halve the diagonal"""
    A = tril(A)
    A[np.diag_indices_from(A)] *= 0.5
    return A
Ejemplo n.º 49
0
def generate_structure(
    num_nodes: int,
    degree: float,
    graph_type: str = "erdos-renyi",
    w_min: float = 0.5,
    w_max: float = 0.5,
) -> StructureModel:
    """Simulate random DAG with some expected degree.
    Notes:
        graph_type (str):
            - erdos-renyi: constructs a graph such that the probability of any given edge is degree / (num_nodes - 1)
            - barabasi-albert: constructs a scale-free graph from an initial connected graph of (degree / 2) nodes
            - full: constructs a fully-connected graph - degree has no effect
    Args:
        num_nodes: number of nodes
        degree: expected node degree, in + out
        graph_type (str):
            - erdos-renyi: constructs a graph such that the probability of any given edge is degree / (num_nodes - 1)
            - barabasi-albert: constructs a scale-free graph from an initial connected graph of (degree / 2) nodes
            - full: constructs a fully-connected graph - degree has no effect
        w_min (float): min absolute weight of an edge in the graph
        w_max (float): max absolute weight of an edge in the graph
    Raises:
        ValueError: if invalid arguments are provided
    Returns:
        weighted DAG
    """

    if num_nodes < 2:
        raise ValueError("DAG must have at least 2 nodes")

    w_min, w_max = abs(w_min), abs(w_max)

    if w_min > w_max:
        raise ValueError(
            "Absolute minimum weight must be less than or equal to maximum weight: {} > {}".format(
                w_min, w_max
            )
        )

    if graph_type == "erdos-renyi":
        p_threshold = float(degree) / (num_nodes - 1)
        p_edge = (np.random.rand(num_nodes, num_nodes) < p_threshold).astype(float)
        edge_flags = np.tril(p_edge, k=-1)

    elif graph_type == "barabasi-albert":
        m = int(round(degree / 2))
        edge_flags = np.zeros([num_nodes, num_nodes])
        bag = [0]
        for i in range(1, num_nodes):
            dest = np.random.choice(bag, size=m)
            for j in dest:
                edge_flags[i, j] = 1
            bag.append(i)
            bag.extend(dest)

    elif graph_type == "full":  # ignore degree
        edge_flags = np.tril(np.ones([num_nodes, num_nodes]), k=-1)

    else:
        raise ValueError("unknown graph type")

    # randomly permute edges - required because we limited ourselves to lower diagonal previously
    perms = np.random.permutation(np.eye(num_nodes, num_nodes))
    edge_flags = perms.T.dot(edge_flags).dot(perms)

    # random edge weights between w_min, w_max or between -w_min, -w_max
    edge_weights = np.random.uniform(low=w_min, high=w_max, size=[num_nodes, num_nodes])
    edge_weights[np.random.rand(num_nodes, num_nodes) < 0.5] *= -1

    adj_matrix = (edge_flags != 0).astype(float) * edge_weights
    graph = StructureModel(adj_matrix)
    return graph
Ejemplo n.º 50
0
def _testme(N):
    """Exercise each function using NxN matrices"""
    import scipy as sp
    from time import time
    if N > 1:
        Sigma = np.cov(sp.randn(N, 2*N))
        Sigma_dot = np.cov(sp.randn(N, 2*N))
    elif N == 1:
        Sigma = np.array([[sp.rand()]])
        Sigma_dot = np.array([[sp.rand()]])
    else:
        assert(False)
    tic = time()
    L = np.linalg.cholesky(Sigma)
    toc = time() - tic
    print('Running np.linalg.cholesky:')
    print('   Time taken: %0.4f s' % toc)
    tic = time()
    L_ub = tril(_chol_unblocked(Sigma))
    toc = time() - tic
    print('Unblocked chol works: %r'
            % np.all(np.isclose(L, L_ub)))
    print('   Time taken: %0.4f s' % toc)
    tic = time()
    L_bl = tril(_chol_blocked(Sigma))
    toc = time() - tic
    print('Blocked chol works: %r'
            % np.all(np.isclose(L, L_bl)))
    print('   Time taken: %0.4f s' % toc)
    tic = time()
    Ldot = _chol_symbolic_fwd(L, Sigma_dot)
    toc = time() - tic
    hh = 1e-5 # finite-difference step-size
    L2 = np.linalg.cholesky(Sigma + Sigma_dot*hh/2)
    L1 = np.linalg.cholesky(Sigma - Sigma_dot*hh/2)
    Ldot_fd = (L2 - L1) / hh
    print('Symbolic chol_fwd works: %r'
            % np.all(np.isclose(Ldot, Ldot_fd)))
    print('   Time taken: %0.4f s' % toc)
    tic = time()
    Ldot_ub = tril(_chol_unblocked_fwd(L, Sigma_dot))
    toc = time() - tic
    print('Unblocked chol_fwd works: %r'
            % np.all(np.isclose(Ldot, Ldot_ub)))
    print('   Time taken: %0.4f s' % toc)
    tic = time()
    Ldot_bl = tril(_chol_blocked_fwd(L, Sigma_dot))
    toc = time() - tic
    print('Blocked chol_fwd works: %r'
            % np.all(np.isclose(Ldot, Ldot_bl)))
    print('   Time taken: %0.4f s' % toc)
    Lbar = tril(sp.randn(N, N))
    tic = time()
    Sigma_bar = _chol_symbolic_rev(L, Lbar)
    toc = time() - tic
    Delta1 = _trace_dot(Lbar, Ldot)
    Delta2 = _trace_dot(Sigma_bar, Sigma_dot)
    print('Symbolic chol_rev works: %r'
            % np.all(np.isclose(Delta1, Delta2)))
    print('   Time taken: %0.4f s' % toc)
    tic = time()
    Sigma_bar_ub = _chol_unblocked_rev(L, Lbar)
    toc = time() - tic
    Delta3 = _trace_dot(Sigma_bar_ub, Sigma_dot)
    print('Unblocked chol_rev works: %r'
            % np.all(np.isclose(Delta1, Delta3)))
    print('   Time taken: %0.4f s' % toc)
    tic = time()
    Sigma_bar_bl = _chol_blocked_rev(L, Lbar)
    toc = time() - tic
    Delta4 = _trace_dot(Sigma_bar_bl, Sigma_dot)
    print('Blocked chol_rev works: %r'
            % np.all(np.isclose(Delta1, Delta4)))
    print('   Time taken: %0.4f s' % toc)
    if FORTRAN_COMPILED:
        tic = time()
        Sigma_bar_f = chol_rev(L, Lbar)
        toc = time() - tic
        Delta5 = _trace_dot(Sigma_bar_f, Sigma_dot)
        print('Fortran chol_rev works: %r'
                % np.all(np.isclose(Delta1, Delta5)))
        print('   Time taken: %0.4f s' % toc)
        tic = time()
        Sigma_bar_fub = _chol_unblocked_fortran_rev(L, Lbar)
        toc = time() - tic
        Delta6 = _trace_dot(Sigma_bar_fub, Sigma_dot)
        print('Fortran unblocked chol_rev works: %r'
                % np.all(np.isclose(Delta1, Delta6)))
        print('   Time taken: %0.4f s' % toc)
    else:
        print('Fortran chol_rev not compiled.')
Ejemplo n.º 51
0
    X = X.drop(var, axis=1)

# remove unused datetime features
X = X.drop([
    'Charge Off Date', 'Funded Date', 'Loan Maturity Date',
    'Loan Paid In Full Date'
],
           axis=1)

# for simplicity, drop the null rows
X = X.dropna()
X = X.reset_index(drop=True)

# check correlations among features
corr = X.corr()
corr.loc[:, :] = np.tril(corr, k=-1)
corr = corr.stack()

print('Large Correlations:')
print(corr[(corr > 0.55) | (corr < -0.55)])

var = X.var()
print('Small Variance')
print(var[var == 0.0])
print(X.columns)

# drop features with small variance + remove one column of each variable to ensure there is no multicollinearity
X = X.drop(['Other', 'LC-F', 'Rent'], axis=1)

# visualize
cph.fit(X, duration_col='T', event_col='Loan Status')
Ejemplo n.º 52
0
M = 10 #牧场数量
F = 3 #工厂数量
L = 10 # 牛奶登记数量
G = 64 # 出产时刻
N = 10 # 每时刻产奶上限
T = 50;#工厂需要的l级奶量上限
V = 200;#牧场的车辆的上限
# 生产时刻
g = list(range(1, G+1))
# 等级,12级伟a++
l = list(range(1, L+1))
# 每吨牛奶的降级成本
C_l = np.array(l)
C_l = np.tile(C_l, (L, 1)).T
C_l = np.tril(C_l)
temp = np.diag(range(1, 11))
C_l = C_l - temp
#工厂:编号:需求
f = list(range(1, F+1))
# 牧场:编号,产量
m = list(range(1, M+1))
# 路线时间矩阵
H_fm = np.random.randint(high=20, low=1, size=(F, M))
# 每个牧场的车辆
V_m = np.random.randint(high=V, low=1, size=(M, 1))
# 每个牧场的车的装载量
v_m = {}
#每个牧场车的平均荷载量
W_m = {}
for i in range(1, M+1):
Ejemplo n.º 53
0
def CausalMask(x, params, axis=-1, **kwargs):
    del params, kwargs
    size = x.shape[axis]
    return onp.tril(onp.ones((1, size, size), dtype=x.dtype), k=0)
Ejemplo n.º 54
0
def causal_mask(size, dtype=np.uint8):
    """Causal attention mask."""
    return onp.tril(onp.ones((1, size, size), dtype=dtype), k=0)
 def testBasic(self):
     rng = np.random.RandomState(0)
     a = np.tril(rng.randn(5, 5))
     b = rng.randn(5, 7)
     for dtype in self.float_types:
         self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype))
Ejemplo n.º 56
0
    def sample_trajectory(self,
                          T=10000,
                          sample_freq=10,
                          spring_prob=[
                              1. / 3, 1. / 3, 1. / 3
                          ]):  #### spring_prob=[1. / 2, 0, 1. / 2]
        n = self.n_balls
        assert (T % sample_freq == 0)
        T_save = int(T / sample_freq - 1)
        diag_mask = np.ones((n, n), dtype=bool)
        np.fill_diagonal(diag_mask, 0)
        counter = 0

        # Sample edges
        edges = np.random.choice(
            self.
            _spring_types,  # self._spring_types is an array of relative spring strengths eg. [0., 0.5, 1.]
            size=(self.n_balls, self.n_balls),
            p=spring_prob)  # prob. of each spring type
        # ^ this edges returns an NxN matrix of relative spring strengths
        edges = np.tril(edges) + np.tril(
            edges, -1).T  # this makes the edges matrix symmetric
        np.fill_diagonal(edges, 0)  # remove self loops

        # Initialize location and velocity
        loc = np.zeros((T_save, 2, n))
        vel = np.zeros((T_save, 2, n))
        loc_next = np.random.randn(
            2, n) * self.loc_std  # randn samples from a unit normal dist.
        vel_next = np.random.randn(2, n)
        v_norm = np.sqrt((vel_next**2).sum(axis=0)).reshape(1, -1)
        vel_next = vel_next * self.vel_norm / v_norm
        loc[0, :, :], vel[0, :, :] = self._clamp(loc_next, vel_next)

        # disables division by zero warning, since I fix it with fill_diagonal
        with np.errstate(divide='ignore'):

            forces_size = -self.interaction_strength * edges
            np.fill_diagonal(
                forces_size,
                0)  # self forces are zero (fixes division by zero)
            F = (forces_size.reshape(1, n, n) * np.concatenate(
                (np.subtract.outer(loc_next[0, :], loc_next[0, :]).reshape(
                    1, n, n), np.subtract.outer(loc_next[1, :],
                                                loc_next[1, :]).reshape(
                                                    1, n, n)))).sum(axis=-1)
            F[F > self._max_F] = self._max_F
            F[F < -self._max_F] = -self._max_F

            vel_next += self._delta_T * F
            # run leapfrog
            for i in range(1, T):
                loc_next += self._delta_T * vel_next
                loc_next, vel_next = self._clamp(loc_next, vel_next)

                if i % sample_freq == 0:
                    loc[counter, :, :], vel[counter, :, :] = loc_next, vel_next
                    counter += 1

                forces_size = -self.interaction_strength * edges
                np.fill_diagonal(forces_size, 0)
                # assert (np.abs(forces_size[diag_mask]).min() > 1e-10)

                F = (forces_size.reshape(1, n, n) * np.concatenate(
                    (np.subtract.outer(loc_next[0, :], loc_next[0, :]).reshape(
                        1, n, n),
                     np.subtract.outer(loc_next[1, :], loc_next[1, :]).reshape(
                         1, n, n)))).sum(axis=-1)
                F[F > self._max_F] = self._max_F
                F[F < -self._max_F] = -self._max_F
                vel_next += self._delta_T * F
            # Add noise to observations
            loc += np.random.randn(T_save, 2, self.n_balls) * self.noise_var
            vel += np.random.randn(T_save, 2, self.n_balls) * self.noise_var
            return loc, vel, edges
Ejemplo n.º 57
0
 def compute_score_full(self, L, tau):
     s = -abs(np.arange(0, L - 1)[:, None] / 2 - np.arange(L)[None, :]) / tau
     s = np.tril(s, 0) + np.triu(s - float("inf"), 1)
     s = np.exp(s - s.max(1, keepdims=True))
     return s / s.sum(1, keepdims=True)
Ejemplo n.º 58
0
import numpy as np
import scipy.linalg
from chainer_optnet import pivots

A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])

lu, piv = scipy.linalg.lu_factor(A)
p = pivots.pivots_to_perm(piv)
q = pivots.inv_perm(p)

print(piv)
print(p)
print(pivots.pivots_to_perm(pivots.perm_to_pivots(p)) == p)
print(pivots.perm_to_pivots(pivots.pivots_to_perm(piv)) == piv)

L, U = np.tril(lu, k=-1) + np.eye(4), np.triu(lu)

print(A - pivots.permute(L @ U, p))
print(pivots.bpermute(A, p) - L @ U)

print(pivots.permute(A, q) - L @ U)
print(A - pivots.bpermute(L @ U, q))

print(pivots.comp_perm(pivots.inv_perm(p), p))
print(pivots.comp_perm(p, pivots.inv_perm(p)))
Ejemplo n.º 59
0
Archivo: mmfe.py Proyecto: sebsou/lmet
def cren1D(etaC,nulamC,M):
    " fonction crÈneau "
    eta1, eta2 =vstack([0,etaC.reshape(-1,1)]), vstack([etaC.reshape(-1,1),1]) # remise sous forme colonne : .reshape(-1,1) ; remise sous forme ligne : .reshape(1,-1)
    epr,  m_   =nulamC.reshape(-1,1)**2,  -2*pi*(arange(2*M)+1)

    #print (eta1, eta2, epr)
    epr0 = npsum( (eta2-eta1) * epr )
    eprm = dot(epr,ones((1,2*M)))
    m =  m_.reshape(1,-1)

    e1m,  e2m  =1j*dot(eta1,m),  1j*dot(eta2,m)
    epr_m =hstack([0., -1j* npsum( (exp(e2m) -exp(e1m)) *eprm ,0) /m_ ])
    epr_p =hstack([0.,  1j* npsum( (exp(-e2m)-exp(-e1m))*eprm ,0) /m_ ])
    return diagflat(epr0*ones(2*M+1)) + triu( toeplitz(epr_m.real)+1j*toeplitz(epr_m.imag) ) + tril( toeplitz(epr_p.real)+1j*toeplitz(epr_p.imag) )
Ejemplo n.º 60
0
 def _random_acyclic_orientation(B_und):
     return np.tril(_random_permutation(B_und), k=-1)