Beispiel #1
0
    def change_pmin_by_innovation(self, x, f):
        Lx, _ = self._gp_innovation_local(x)
        dMdb = Lx
        dVdb = -Lx.dot(Lx.T)
        stoch_changes = dMdb.dot(self.W)
        Mb_new = self.Mb[:, None] + stoch_changes

        Vb_new = self.Vb + dVdb

        Vb_new[np.diag_indices(Vb_new.shape[0])] = np.clip(Vb_new[np.diag_indices(Vb_new.shape[0])], np.finfo(Vb_new.dtype).eps, np.inf)

        Vb_new[np.where((Vb_new < np.finfo(Vb_new.dtype).eps) & (Vb_new > -np.finfo(Vb_new.dtype).eps))] = 0
        try:
            cVb_new = np.linalg.cholesky(Vb_new)
        except np.linalg.LinAlgError:
            try:
                cVb_new = np.linalg.cholesky(Vb_new + 1e-10 * np.eye(Vb_new.shape[0]))
            except np.linalg.LinAlgError:
                try:
                    cVb_new = np.linalg.cholesky(Vb_new + 1e-6 * np.eye(Vb_new.shape[0]))
                except np.linalg.LinAlgError:
                    cVb_new = np.linalg.cholesky(Vb_new + 1e-3 * np.eye(Vb_new.shape[0]))
        f_new = np.dot(cVb_new, self.F.T)
        f_new = f_new[:, :, None]
        Mb_new = Mb_new[:, None, :]
        f_new = Mb_new + f_new
        return self.calc_pmin(f_new)
Beispiel #2
0
    def predict(self, X_test, **kwargs):

        # Normalize input data to 0 mean and unit std
        X_ = (X_test - self.X_mean) /  self.X_std

        # Get features from the net

        layers = lasagne.layers.get_all_layers(self.network)
        theta = lasagne.layers.get_output(layers[:-1], X_)[-1].eval()

        # Marginalise predictions over hyperparameters of the BLR
        mu = np.zeros([len(self.models), X_test.shape[0]])
        var = np.zeros([len(self.models), X_test.shape[0]])

        for i, m in enumerate(self.models):
            mu[i], var[i] = m.predict(theta)

        # See the algorithm runtime prediction paper by Hutter et al
        # for the derivation of the total variance
        m = np.array([[mu.mean()]])
        v = np.mean(mu ** 2 + var) - m ** 2

        # Clip negative variances and set them to the smallest
        # positive float value
        if v.shape[0] == 1:
            v = np.clip(v, np.finfo(v.dtype).eps, np.inf)
        else:
            v[np.diag_indices(v.shape[0])] = \
                    np.clip(v[np.diag_indices(v.shape[0])],
                            np.finfo(v.dtype).eps, np.inf)
            v[np.where((v < np.finfo(v.dtype).eps) & (v > -np.finfo(v.dtype).eps))] = 0


        return m, v
Beispiel #3
0
def _laplacian_dense(csgraph, normed='geometric', symmetrize=True, scaling_epps=0., renormalization_exponent=1, return_diag=False, return_lapsym = False):
    n_nodes = csgraph.shape[0]
    if symmetrize:
        lap = (csgraph + csgraph.T)/2.
    else:
        lap = csgraph.copy()
    degrees = np.asarray(lap.sum(axis=1)).squeeze()
    di = np.diag_indices( lap.shape[0] )  # diagonal indices

    if normed == 'symmetricnormalized':
        w = np.sqrt(degrees)
        w_zeros = (w == 0)
        w[w_zeros] = 1
        lap /= w
        lap /= w[:, np.newaxis]
        di = np.diag_indices( lap.shape[0] )
        lap[di] -= (1 - w_zeros).astype(lap.dtype)
    if normed == 'geometric':
        w = degrees.copy()     # normalize once symmetrically by d
        w_zeros = (w == 0)
        w[w_zeros] = 1
        lap /= w
        lap /= w[:, np.newaxis]
        w = np.asarray(lap.sum(axis=1)).squeeze() #normalize again asymmetricall
        if return_lapsym:
            lapsym = lap.copy()
        lap /= w[:, np.newaxis]
        lap[di] -= (1 - w_zeros).astype(lap.dtype)
    if normed == 'renormalized':
        w = degrees**renormalization_exponent;
        # same as 'geometric' from here on
        w_zeros = (w == 0)
        w[w_zeros] = 1
        lap /= w
        lap /= w[:, np.newaxis]
        w = np.asarray(lap.sum(axis=1)).squeeze() #normalize again asymmetricall
        if return_lapsym:
            lapsym = lap.copy()
        lap /= w[:, np.newaxis]
        lap[di] -= (1 - w_zeros).astype(lap.dtype)
    if normed == 'unnormalized':
        dum = lap[di]-degrees[np.newaxis,:]
        lap[di] = dum[0,:]
    if normed == 'randomwalk':
        lap /= degrees[:,np.newaxis]
        lap -= np.eye(lap.shape[0])

    if scaling_epps > 0.:
        lap *= 4/(scaling_epps**2)

    if return_diag:
        diag = np.array( lap[di] )
        if return_lapsym:
            return lap, diag, lapsym, w
        else: 
            return lap, diag
    elif return_lapsym:
        return lap, lapsym, w
    else:
        return lap
Beispiel #4
0
def _gamma1_intermediates(mycc, t1, t2, l1, l2, eris=None):
    doo, dov, dvo, dvv = gccsd_rdm._gamma1_intermediates(mycc, t1, t2, l1, l2)

    if eris is None: eris = mycc.ao2mo()

    nocc, nvir = t1.shape
    bcei = numpy.asarray(eris.ovvv).conj().transpose(3,2,1,0)
    majk = numpy.asarray(eris.ooov).conj().transpose(2,3,0,1)
    bcjk = numpy.asarray(eris.oovv).conj().transpose(2,3,0,1)

    mo_e = eris.mo_energy
    eia = mo_e[:nocc,None] - mo_e[nocc:]
    d3 = lib.direct_sum('ia+jb+kc->ijkabc', eia, eia, eia)

    t3c =(numpy.einsum('jkae,bcei->ijkabc', t2, bcei)
        - numpy.einsum('imbc,majk->ijkabc', t2, majk))
    t3c = t3c - t3c.transpose(0,1,2,4,3,5) - t3c.transpose(0,1,2,5,4,3)
    t3c = t3c - t3c.transpose(1,0,2,3,4,5) - t3c.transpose(2,1,0,3,4,5)
    t3c /= d3

    t3d = numpy.einsum('ia,bcjk->ijkabc', t1, bcjk)
    t3d += numpy.einsum('ai,jkbc->ijkabc', eris.fock[nocc:,:nocc], t2)
    t3d = t3d - t3d.transpose(0,1,2,4,3,5) - t3d.transpose(0,1,2,5,4,3)
    t3d = t3d - t3d.transpose(1,0,2,3,4,5) - t3d.transpose(2,1,0,3,4,5)
    t3d /= d3

    goo = numpy.einsum('iklabc,jklabc->ij', (t3c+t3d).conj(), t3c) * (1./12)
    gvv = numpy.einsum('ijkacd,ijkbcd->ab', t3c+t3d, t3c.conj()) * (1./12)
    doo[numpy.diag_indices(nocc)] -= goo.diagonal()
    dvv[numpy.diag_indices(nvir)] += gvv.diagonal()
    dvo += numpy.einsum('ijab,ijkabc->ck', t2.conj(), t3c) * (1./4)

    return doo, dov, dvo, dvv
    def test_eigenvectors(self):
        P = self.bdc.transition_matrix()

        # k==None
        ev = eigvals(P)
        ev = ev[np.argsort(np.abs(ev))[::-1]]
        Dn = np.diag(ev)

        # right eigenvectors
        Rn = eigenvectors(P)
        assert_allclose(np.dot(P,Rn),np.dot(Rn,Dn))
        # left eigenvectors
        Ln = eigenvectors(P, right=False)
        assert_allclose(np.dot(Ln.T,P),np.dot(Dn,Ln.T))
        # orthogonality
        Xn = np.dot(Ln.T, Rn)
        di = np.diag_indices(Xn.shape[0])
        Xn[di] = 0.0
        assert_allclose(Xn,0)

        # k!=None
        Dnk = Dn[:,0:self.k][0:self.k,:]
        # right eigenvectors
        Rn = eigenvectors(P, k=self.k)
        assert_allclose(np.dot(P,Rn),np.dot(Rn,Dnk))
        # left eigenvectors
        Ln = eigenvectors(P, right=False, k=self.k)
        assert_allclose(np.dot(Ln.T,P),np.dot(Dnk,Ln.T))
        # orthogonality
        Xn = np.dot(Ln.T, Rn)
        di = np.diag_indices(self.k)
        Xn[di] = 0.0
        assert_allclose(Xn,0)
def test_diag_indices():
    di = diag_indices(4)
    a = array([[1, 2, 3, 4],
               [5, 6, 7, 8],
               [9, 10, 11, 12],
               [13, 14, 15, 16]])
    a[di] = 100
    yield (assert_array_equal, a,
           array([[100,   2,   3,   4],
                  [  5, 100,   7,   8],
                  [  9,  10, 100,  12],
                  [ 13,  14,  15, 100]]))

    # Now, we create indices to manipulate a 3-d array:
    d3 = diag_indices(2, 3)

    # And use it to set the diagonal of a zeros array to 1:
    a = zeros((2, 2, 2),int)
    a[d3] = 1
    yield (assert_array_equal, a,
           array([[[1, 0],
                   [0, 0]],

                  [[0, 0],
                   [0, 1]]]) )
Beispiel #7
0
def _make_rdm1(mycc, d1, with_frozen=True, ao_repr=False):
    '''dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>

    The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
    The contraction between 1-particle Hamiltonian and rdm1 is
    E = einsum('pq,qp', h1, rdm1)
    '''
    doo, dov, dvo, dvv = d1
    nocc, nvir = dov.shape
    nmo = nocc + nvir
    dm1 = numpy.empty((nmo,nmo), dtype=doo.dtype)
    dm1[:nocc,:nocc] = doo + doo.conj().T
    dm1[:nocc,nocc:] = dov + dvo.conj().T
    dm1[nocc:,:nocc] = dm1[:nocc,nocc:].conj().T
    dm1[nocc:,nocc:] = dvv + dvv.conj().T
    dm1[numpy.diag_indices(nocc)] += 2

    if with_frozen and not (mycc.frozen is 0 or mycc.frozen is None):
        nmo = mycc.mo_occ.size
        nocc = numpy.count_nonzero(mycc.mo_occ > 0)
        rdm1 = numpy.zeros((nmo,nmo), dtype=dm1.dtype)
        rdm1[numpy.diag_indices(nocc)] = 2
        moidx = numpy.where(mycc.get_frozen_mask())[0]
        rdm1[moidx[:,None],moidx] = dm1
        dm1 = rdm1

    if ao_repr:
        mo = mycc.mo_coeff
        dm1 = lib.einsum('pi,ij,qj->pq', mo, dm1, mo.conj())
    return dm1
Beispiel #8
0
Datei: gp.py Projekt: Imdrail/GPy
    def predict_wishard_embedding(self, Xnew, kern=None, mean=True, covariance=True):
        """
        Predict the wishard embedding G of the GP. This is the density of the
        input of the GP defined by the probabilistic function mapping f.
        G = J_mean.T*J_mean + output_dim*J_cov.

        :param array-like Xnew: The points at which to evaluate the magnification.
        :param :py:class:`~GPy.kern.Kern` kern: The kernel to use for the magnification.

        Supplying only a part of the learning kernel gives insights into the density
        of the specific kernel part of the input function. E.g. one can see how dense the
        linear part of a kernel is compared to the non-linear part etc.
        """
        if kern is None:
            kern = self.kern

        mu_jac, var_jac = self.predict_jacobian(Xnew, kern, full_cov=False)
        mumuT = np.einsum('iqd,ipd->iqp', mu_jac, mu_jac)
        Sigma = np.zeros(mumuT.shape)
        if var_jac.ndim == 3:
            Sigma[(slice(None), )+np.diag_indices(Xnew.shape[1], 2)] = var_jac.sum(-1)
        else:
            Sigma[(slice(None), )+np.diag_indices(Xnew.shape[1], 2)] = self.output_dim*var_jac
        G = 0.
        if mean:
            G += mumuT
        if covariance:
            G += Sigma
        return G
Beispiel #9
0
def compute_genetic_distance(X, **kwargs):
    """Given genotype matrix X, returns pairwise genetic distance between individuals
    using the estimator described in Theorem 1.

    Args:
        X: n * p matrix of 0/1/2/nan, n is #individuals, p is #SNPs
    """

    n, p = X.shape
    missing = np.isnan(X)
    col_sums = np.nansum(X, axis=0)
    col_counts = np.sum(~missing, axis=0)
    mu_hat = col_sums / 2. / col_counts     # p dimensional

    eta0_hat = np.nansum(X**2 - X, axis=0) / 2. / col_counts - mu_hat**2

    X_tmp = X/2.
    X_tmp[missing] = 0
    non_missing = np.array(~missing, dtype=float)

    X_shifted = X_tmp - mu_hat
    gdm_squared = 2. * np.mean(eta0_hat) - 2. * np.dot(X_shifted, X_shifted.T) / np.dot(non_missing, non_missing.T)
    gdm_squared[np.diag_indices(n)] = 0.

    if len(gdm_squared[gdm_squared < 0]) > 0:
        # shift all entries by the smallest amount that makes them non-negative
        shift = - np.min(gdm_squared[gdm_squared < 0])
        gdm_squared += shift
        gdm_squared[np.diag_indices(n)] = 0.

    gdm = np.sqrt(np.maximum(gdm_squared, 0.))

    return gdm
def test_calculate2():
    # Two mutations in one cluster, two in second
    c = np.zeros((4,2))
    c[0:2,0] = 1
    c[2:4,1] = 1
    c = np.dot(c,c.T)

    # Identical
    assert round(calculate2(c,c), 2) == 1.00

    #Inverted
    c2 = np.abs(c-1)
    c2[np.diag_indices(4)] = 1
    assert round(calculate2(c,c2), 2) == -0.68

    # Differences first 3 SSMs in first cluster, 4th ssm in second cluster
    c3 = np.zeros((4,2))
    c3[0:3,0] = 1
    c3[3:4,1] = 1
    c3 = np.dot(c3,c3.T)
    assert round(calculate2(c,c3), 2) == -0.20

    # Metric doesn't count the diagnonal
    c4 = c+0
    c4[np.diag_indices(4)] = 0
    assert round(calculate2(c,c4), 2) == 0.74

    c2[np.diag_indices(4)] = 0
    assert round(calculate2(c,c2), 2) == -0.23
    def kNN_graph(self, k, metric, mutual=False):
#        self.latex = []
        nn = NearestNeighbors(k, algorithm="brute", metric=metric, n_jobs=-1).fit(self.X)
        UAM = nn.kneighbors_graph(self.X).toarray() #unweighted adjacency matrix
        m = UAM.shape[0]
        self.W = np.zeros((m, m)) #(weighted) adjecancy matrix
        self.D = np.zeros((m, m)) #degree matrix
        if mutual == False:
            if self.full_calculated:
                indices = np.where(UAM == 1)
                self.W[indices] = self.full_W[indices]
                self.D[np.diag_indices(m)] = np.sum(self.W, 1)
            else:
                for i in range(m):
                    for j in range(m):
                        if UAM[i,j] == 1:
                            sim = self.s(self.X[i], self.X[j], self.d)
                            self.W[i,j] = sim
                            self.D[i,i] += sim
        else:
            if self.full_calculated:
                indices = np.where(np.logical_and(UAM == 1, UAM.T == 1).astype(int) == 1)
                self.W[indices] = self.full_W[indices]
                self.D[np.diag_indices(m)] = np.sum(self.W != 0, 1)
            else:
                for i in range(m):
                    for j in range(m):
                        if UAM[i,j] == 1 and UAM[j,i] == 1:
                            sim = self.s(self.X[i], self.X[j], self.d)
                            self.W[i,j] = sim
                            self.D[i,i] += sim
        self.W = np.nan_to_num(self.W)
        self.graph = "kNN graph, k = " + str(k) + ", mutual:" + str(mutual)
Beispiel #12
0
def test_naf_layer_full():
    batch_size = 2
    for nb_actions in (1, 3):
        # Construct single model with NAF as the only layer, hence it is fully deterministic
        # since no weights are used, which would be randomly initialized.
        L_flat_input = Input(shape=((nb_actions * nb_actions + nb_actions) // 2,))
        mu_input = Input(shape=(nb_actions,))
        action_input = Input(shape=(nb_actions,))
        x = NAFLayer(nb_actions, mode='full')([L_flat_input, mu_input, action_input])
        model = Model(inputs=[L_flat_input, mu_input, action_input], outputs=x)
        model.compile(loss='mse', optimizer='sgd')
        
        # Create random test data.
        L_flat = np.random.random((batch_size, (nb_actions * nb_actions + nb_actions) // 2)).astype('float32')
        mu = np.random.random((batch_size, nb_actions)).astype('float32')
        action = np.random.random((batch_size, nb_actions)).astype('float32')

        # Perform reference computations in numpy since these are much easier to verify.
        L = np.zeros((batch_size, nb_actions, nb_actions)).astype('float32')
        LT = np.copy(L)
        for l, l_T, l_flat in zip(L, LT, L_flat):
            l[np.tril_indices(nb_actions)] = l_flat
            l[np.diag_indices(nb_actions)] = np.exp(l[np.diag_indices(nb_actions)])
            l_T[:, :] = l.T
        P = np.array([np.dot(l, l_T) for l, l_T in zip(L, LT)]).astype('float32')
        A_ref = np.array([np.dot(np.dot(a - m, p), a - m) for a, m, p in zip(action, mu, P)]).astype('float32')
        A_ref *= -.5

        # Finally, compute the output of the net, which should be identical to the previously
        # computed reference.
        A_net = model.predict([L_flat, mu, action]).flatten()
        assert_allclose(A_net, A_ref, rtol=1e-5)
Beispiel #13
0
def beam_search(dec,state,y,data,beam_width,mydict_inv):  
    beam_width=beam_width
    xp=cuda.cupy
    batchsize=data.shape[0]
    vocab_size=len(mydict_inv)
    topk=20
    route = np.zeros((batchsize,beam_width,50)).astype(np.int32)
    
    for j in range(50):
        if j == 0:
            y = Variable(xp.array(np.argmax(y.data.get(), axis=1)).astype(xp.int32))
            state,y = dec(y, state, train=False)
            h=state['h1'].data
            c=state['c1'].data
            h=xp.tile(h.reshape(batchsize,1,-1), (1,beam_width,1))
            c=xp.tile(c.reshape(batchsize,1,-1), (1,beam_width,1))
            ptr=F.log_softmax(y).data.get()
            pred_total_city = np.argsort(ptr)[:,::-1][:,:beam_width]
            pred_total_score = np.sort(ptr)[:,::-1][:,:beam_width]
            route[:,:,j] = pred_total_city
            pred_total_city=pred_total_city.reshape(batchsize,beam_width,1)
        else:
            pred_next_score=np.zeros((batchsize,beam_width,topk))
            pred_next_city=np.zeros((batchsize,beam_width,topk)).astype(np.int32)
            score2idx=np.zeros((batchsize,beam_width,topk)).astype(np.int32)
            for b in range(beam_width):
                state={'c1':Variable(c[:,b,:]), 'h1':Variable(h[:,b,:])}
                cur_city = xp.array([pred_total_city[i,b,j-1] for i in range(batchsize)]).astype(xp.int32)
                state,y = dec(cur_city,state, train=False)
                h[:,b,:]=state['h1'].data
                c[:,b,:]=state['c1'].data
                ptr=F.log_softmax(y).data.get()
                pred_next_score[:,b,:]=np.sort(ptr, axis=1)[:,::-1][:,:topk]
                pred_next_city[:,b,:]=np.argsort(ptr, axis=1)[:,::-1][:,:topk]

            h=F.stack([h for i in range(topk)], axis=2).data
            c=F.stack([c for i in range(topk)], axis=2).data
            
            pred_total_city = np.tile(route[:,:,:j],(1,1,topk)).reshape(batchsize,beam_width,topk,j)
            pred_next_city = pred_next_city.reshape(batchsize,beam_width,topk,1)
            pred_total_city = np.concatenate((pred_total_city,pred_next_city),axis=3)

            pred_total_score = np.tile(pred_total_score.reshape(batchsize,beam_width,1),(1,1,topk)).reshape(batchsize,beam_width,topk,1)
            pred_next_score = pred_next_score.reshape(batchsize,beam_width,topk,1)
            pred_total_score += pred_next_score

            idx = pred_total_score.reshape(batchsize,beam_width * topk).argsort(axis=1)[:,::-1][:,:beam_width]

            pred_total_city = pred_total_city[:,idx//topk, np.mod(idx,topk), :][np.diag_indices(batchsize,ndim=2)].reshape(batchsize,beam_width,j+1)
            pred_total_score = pred_total_score[:,idx//topk, np.mod(idx,topk), :][np.diag_indices(batchsize,ndim=2)].reshape(batchsize,beam_width,1)
            h = h[:,idx//topk, np.mod(idx,topk), :][np.diag_indices(batchsize,ndim=2)].reshape(batchsize,beam_width,-1)
            c = c[:,idx//topk, np.mod(idx,topk), :][np.diag_indices(batchsize,ndim=2)].reshape(batchsize,beam_width,-1)

            route[:,:,:j+1] =pred_total_city
            if (pred_total_city[:,:,j] == 15).all():
                break


    return route[:,0,:j+1].tolist()
Beispiel #14
0
 def derivative_of_marginalLikelihood(self, hyp):
     """
     This method calculates the derivative of marginal likelihood.
     
     You may refer to this code to see what methods in numpy is useful
     
     while you are implementing other functions.
     
     Do not modify this method.
     """
     trainingData = self.trainingData
     trainingLabels = self.trainingLabels
     c = len(self.legalLabels)
     [n,d] = self.trainingShape
     hyp = np.reshape(hyp, self.hypSize)
     
     [mode,_] = self.findMode(trainingData, trainingLabels, hyp)
     [t,_] = self.trainingLabels2t(trainingLabels)
       
     Ks = self.calculateCovariance(trainingData, hyp)
     [_,[E, M, R, b, totpi, K],_] = self.calculateIntermediateValues(t, mode, Ks)
       
     MRE = np.linalg.solve(M,R.T.dot(E))
     MMRE = np.linalg.solve(M.T,MRE)
     KWinvinv = E-E.dot(R.dot(MMRE))
       
     KinvWinv = K-K.dot(KWinvinv.dot(K))
     partitioned_KinvWinv = np.transpose(np.array(np.split(np.array(np.split(KinvWinv, c)),c,2)),[2,3,1,0])
       
     s2 = np.zeros([n,c])
     for i in range(n):
         pi_n = softmax(np.reshape(mode,[c,n])[:,i:i+1].T).T
         pipj = pi_n.dot(pi_n.T)
         pi_3d = np.zeros([c,c,c])
         pi_3d[np.diag_indices(c,3)] = pi_n.ravel()
         pipjpk = np.tensordot(pi_n,np.reshape(pipj,(1,c,c)),(1,0))
         pipj_3d = np.zeros([c,c,c])
         pipj_3d[np.diag_indices(c)] = pipj
         W_3d = pi_3d + 2 * pipjpk - pipj_3d - np.transpose(pipj_3d,[2,1,0]) - np.transpose(pipj_3d,[1,2,0])
         s2[i,:] = -0.5*np.trace(partitioned_KinvWinv[i,i].dot(W_3d))
           
     b_rs = np.reshape(b, [c,n])
     dZ = np.zeros(hyp.shape)
     for j in range(2):
         cs = []
         zeroCs = [np.zeros([n,n]) for i in range(c)]
         for i in range(c):
             C = self.covARD(hyp[i,:],trainingData,None,j)
             dZ[i,j] = 0.5*b_rs[i,:].T.dot(C.dot(b_rs[i,:]))
             zeroCs[i] = C
             cs.append(self.block_diag(zeroCs))
             zeroCs[i] = np.zeros([n,n])
             
         for i in range(c):
             dd = cs[i].dot(t-totpi)
             s3 = dd - K.dot(KWinvinv.dot(dd))
             dZ[i,j] +=  - 0.5 * np.trace(KWinvinv.dot(cs[i])) + s2.T.ravel().dot(s3) # 
               
     return -dZ.ravel()
Beispiel #15
0
def main():
    order = 4
    cov, inv_cov, inv_cov_est, t_direct, t_patches = get_mats(order)
    header = '=== inv_scale_length:{:.1f}: ==='
    header = header.format(inv_scale_length)
    print(header, file=logf)
    print('Time to invert directly:   {:.4f} s'.format(t_direct), file=logf)
    maybe_zero = inv_cov_est - inv_cov_est.T
    for i in range(npix):
        for j in range(npix):
            print(maybe_zero[i][j])
    return 0
    #patch_identity.shape = shape

    t2 = time()
    print('Time to invert by patches: {:.4f} s'.format(t2-t1), file=logf)
    I = np.dot(inv_cov, cov)
    I_est = np.dot(inv_cov_est, cov)

    t3 = time()
    print('Time to calculate checks:  {:.4f} s'.format(t3-t2), file=logf)

    I_diag = I[np.diag_indices(I.shape[0])]
    I_diag_dev = np.max(np.abs(I_diag-1.))
    I[np.diag_indices(I.shape[0])] = 0.
    I_off_diag_dev = np.max(np.abs(I))

    print('C^-1 C:', file=logf)
    print('  * max. abs. dev. along diagonal: {:.3g}'.format(I_diag_dev), file=logf)
    print('  * max. abs. dev. off diagonal: {:.3g}'.format(I_off_diag_dev), file=logf)

    I_diag = I_est[np.diag_indices(I_est.shape[0])]
    I_diag_dev = np.max(np.abs(I_diag-1.))
    I_est[np.diag_indices(I_est.shape[0])] = 0.
    I_off_diag_dev = np.max(np.abs(I_est))

    print('(C^-1)_{est} C:', file=logf)
    print('  * max. abs. dev. along diagonal: {:.3g}'.format(I_diag_dev), file=logf)
    print('  * max. abs. dev. off diagonal: {:.3g}'.format(I_off_diag_dev), file=logf)

    #plt.imsave('dist.png', dist, cmap=colors.inferno_r)
    row2fig('dist.png', dist)
    plt.imsave('dist_matrix.png', dist_mat, cmap=colors.inferno_r)
    plt.imsave('cov.png', cov, cmap=colors.inferno_r)
    vmax = np.max(np.abs(inv_cov))
    plt.imsave('inv_cov.png', inv_cov, cmap='coolwarm_r', vmin=-vmax, vmax=vmax)
    vmax = np.max(np.abs(inv_cov_est))
    plt.imsave('inv_cov_est.png', inv_cov_est, cmap='coolwarm_r', vmin=-vmax, vmax=vmax)
    row2fig('inv_cov_est_row.png', inv_cov_est[1387])
    with open('inv_cov_est_diag.txt', 'w') as icer:
        for i in range(npix):
            print(np.max(inv_cov_est[i]), file=icer)
    with open('inv_cov_diag.txt', 'w') as icer:
        for i in range(npix):
            print(np.max(inv_cov[i]), file=icer)
    #plt.imsave('patches.png', patch_identity, cmap=colors.inferno_r)
    row2fig('patches.png', patch_identity)

    return 0
Beispiel #16
0
 def convert(self, network):
     """ Generates an n-dimensional connectivity matrix. """
     if not isinstance(network, NeuralNetwork):
         network = NeuralNetwork(network)
     
     os = np.atleast_1d(self.substrate_shape)
     # Unpack the genotype
     w, f = network.cm.copy(), network.node_types[:]
     
     # Create substrate
     if len(os) == 1:
         cm = np.mgrid[-1:1:os[0]*1j,-1:1:os[0]*1j].transpose((1,2,0))
     elif len(os) == 2:
         cm = np.mgrid[-1:1:os[0]*1j,-1:1:os[1]*1j,-1:1:os[0]*1j,-1:1:os[1]*1j].transpose(1,2,3,4,0)
     else:
         raise NotImplementedError("3+D substrates not supported yet.")
     # Insert a bias
     cm = np.insert(cm, 0, 1.0, -1)
     # Check if the genotype has enough weights
     if w.shape[0] < cm.shape[-1]:
         raise Exception("Genotype weight matrix is too small (%s)" % (w.shape,) )
     # Append zeros
     n_elems = len(f)
     nvals = np.zeros(cm.shape[:-1] + (n_elems - cm.shape[-1],))
     cm = np.concatenate((cm, nvals), -1)
     shape = cm.shape
     
     # Fix the input elements
     frozen = len(os) * 2 + 1
     w[:frozen] = 0.0
     w[np.diag_indices(frozen, 2)] = 1.0
     f[:frozen] = [lambda x: x] * frozen
     w[np.diag_indices(n_elems)] = (1 - self.recursion) * w[np.diag_indices(n_elems)] + self.recursion
     
     # Compute the reaction
     self._steps = []
     laplacian = np.empty_like(cm[..., frozen:])
     kernel = self.diffusion * np.array([1.,2.,1.])
     for _ in range(self.reaction_steps):
         cm = np.dot(w, cm.reshape((-1, n_elems)).T)
         cm = np.clip(cm, self.cm_range[0], self.cm_range[1])
         for el in xrange(cm.shape[0]):
             cm[el,:] = f[el](cm[el,:])
         cm = cm.T.reshape(shape)            
         # apply diffusion
         laplacian[:] = 0.0
         for ax in xrange(cm.ndim - 1):
             laplacian += scipy.ndimage.filters.convolve1d(cm[..., frozen:], kernel, axis=ax, mode='constant')
         cm[..., frozen:] += laplacian
         self._steps.append(cm[...,-1])
         
     # Return the values of the last element (indicating connectivity strength)
     output = cm[..., -1]
     # Build a network object
     net = NeuralNetwork().from_matrix(output)
     if self.sandwich:
         net.make_sandwich()
     return net
Beispiel #17
0
def update_amps(cc, t1, t2, eris):
    assert(isinstance(eris, _PhysicistsERIs))
    nocc, nvir = t1.shape
    fock = eris.fock

    fov = fock[:nocc,nocc:]
    mo_e_o = eris.mo_energy[:nocc]
    mo_e_v = eris.mo_energy[nocc:] + cc.level_shift

    tau = imd.make_tau(t2, t1, t1)

    Fvv = imd.cc_Fvv(t1, t2, eris)
    Foo = imd.cc_Foo(t1, t2, eris)
    Fov = imd.cc_Fov(t1, t2, eris)
    Woooo = imd.cc_Woooo(t1, t2, eris)
    Wvvvv = imd.cc_Wvvvv(t1, t2, eris)
    Wovvo = imd.cc_Wovvo(t1, t2, eris)

    # Move energy terms to the other side
    Fvv[np.diag_indices(nvir)] -= mo_e_v
    Foo[np.diag_indices(nocc)] -= mo_e_o

    # T1 equation
    t1new  =  einsum('ie,ae->ia', t1, Fvv)
    t1new += -einsum('ma,mi->ia', t1, Foo)
    t1new +=  einsum('imae,me->ia', t2, Fov)
    t1new += -einsum('nf,naif->ia', t1, eris.ovov)
    t1new += -0.5*einsum('imef,maef->ia', t2, eris.ovvv)
    t1new += -0.5*einsum('mnae,mnie->ia', t2, eris.ooov)
    t1new += fov.conj()

    # T2 equation
    Ftmp = Fvv - 0.5*einsum('mb,me->be', t1, Fov)
    tmp = einsum('ijae,be->ijab', t2, Ftmp)
    t2new = tmp - tmp.transpose(0,1,3,2)
    Ftmp = Foo + 0.5*einsum('je,me->mj', t1, Fov)
    tmp = einsum('imab,mj->ijab', t2, Ftmp)
    t2new -= tmp - tmp.transpose(1,0,2,3)
    t2new += np.asarray(eris.oovv).conj()
    t2new += 0.5*einsum('mnab,mnij->ijab', tau, Woooo)
    t2new += 0.5*einsum('ijef,abef->ijab', tau, Wvvvv)
    tmp = einsum('imae,mbej->ijab', t2, Wovvo)
    tmp -= -einsum('ie,ma,mbje->ijab', t1, t1, eris.ovov)
    tmp = tmp - tmp.transpose(1,0,2,3)
    tmp = tmp - tmp.transpose(0,1,3,2)
    t2new += tmp
    tmp = einsum('ie,jeba->ijab', t1, np.array(eris.ovvv).conj())
    t2new += (tmp - tmp.transpose(1,0,2,3))
    tmp = einsum('ma,ijmb->ijab', t1, np.asarray(eris.ooov).conj())
    t2new -= (tmp - tmp.transpose(0,1,3,2))

    eia = mo_e_o[:,None] - mo_e_v
    eijab = lib.direct_sum('ia,jb->ijab', eia, eia)
    t1new /= eia
    t2new /= eijab

    return t1new, t2new
Beispiel #18
0
def test_general_metric(seed=1234, N=2, ndim=3):
    np.random.seed(seed)

    _general_metric(np.eye(ndim), N=N, ndim=ndim)

    L = np.random.randn(ndim, ndim)
    L[np.diag_indices(ndim)] = np.exp(L[np.diag_indices(ndim)])
    L[np.triu_indices(ndim, 1)] = 0.0
    metric = np.dot(L, L.T)
    _general_metric(metric, N=N, ndim=ndim)
Beispiel #19
0
def parse_dataFrame(df, df_name):
    """
    removes overlapping cells by x, y, and source image. 
    """

    x = np.array(df['x'])
    y = np.array(df['y'])
    i = np.array(df['image'])

    # create linkage table 
    dx = x[:, np.newaxis] - x
    dy = y[:, np.newaxis] - y
    di = (i[:, np.newaxis] != i)*1e9

    # set diagonal of linkage table to large number
    dx[np.diag_indices(len(dx))] = 1e9
    dy[np.diag_indices(len(dy))] = 1e9
    di[np.diag_indices(len(di))] = 1e9

    # get absolute values
    dx = np.abs(dx)
    dy = np.abs(dy)

    # sum vector of x, y, and image
    d = dx + dy + di

    # set variable = to min of 0 for vector summation
    b = d.min(0)

    # create array with zeros of length b 
    exclude = np.zeros(len(b))

    # iterate through index of length b 
    for i in range(len(b)):
        # if True/anything present at exclude[i]; skip
        if exclude[i]:
            continue
            # add True to index position of exclude if d[i] < 30     
        exclude[(d[i] < 20).nonzero()] = True
        
     
    d = {}

    # create overlap frame
    df_overlap = df[exclude == True]
    df_overlap = df_overlap.reset_index(drop=True)
    

    # create non_overlap frame
    df_nonoverlap = df[exclude == False]
    df_nonoverlap = df_nonoverlap.reset_index(drop=True)
    d['{}_non_overlap'.format(df_name)] = df_nonoverlap

    return d
Beispiel #20
0
        def on_stiffness(self, factor):

            diag3 = np.diag_indices( 3 )
            diag6 = np.diag_indices( 6 )            

            # diagonal
            self.stiffness[ diag6 ] = - factor * self.k

            # off-diagonal
            self.stiffness[ 3:, :3 ][diag3] = factor * self.k
            self.stiffness[ :3, 3: ][diag3] = factor * self.k            
Beispiel #21
0
def do_test(num):
    #x_coords,y_coords = np.loadtxt('cats/src_336.tab',comments='#',usecols=(0,1),unpack=True) # read in phot results as arrays
    x_coords = np.array(np.random.rand(num)*500.,dtype=np.float32)
    y_coords = np.array(np.random.rand(num)*500.,dtype=np.float32)

    j_t1 = time.time()
    length = len(y_coords)
    a = range(length)
    
    j_min_distances = []
    
    for i in a:
        indiv_dist = []
        for j in a:
            if i != j:
                indiv_dist.append(np.sqrt((x_coords[i]-x_coords[j])**2+(y_coords[i]-y_coords[j])**2))
        j_min_distances.append(np.amin(indiv_dist))
                
    j_t2 = time.time()
    
    ####################
    a_t1 =time.time()
    
    '''the padding is necessary to allow us to take the transpose.
    xpad is just [x]'''
    xpad = x_coords[None,:]
    
    '''xpad - xpad.T produces a 2D array with the difference between every
    combination of points in xpad. We then wrap in in a masked array.
    It's a tad bit faster to square these now.'''
    xdiff = np.ma.array(xpad - xpad.T)**2
    '''now mask out the diagonal values because those will be zero'''
    xdiff[np.diag_indices(xdiff.shape[0])] = np.ma.masked

    ypad = y_coords[None, :]
    ydiff = np.ma.array(ypad - ypad.T)**2
    ydiff[np.diag_indices(ydiff.shape[0])] = np.ma.masked

    '''find the minima. The .data makes this a standard numpy
    array rather than a masked array'''
    a_min_distances = np.min(np.sqrt(xdiff + ydiff),axis=1).data
    
    a_t2 = time.time()

    #####################
    
    avgdiff = np.mean(np.abs(j_min_distances - a_min_distances))
    stddiff = np.std(np.abs(j_min_distances - a_min_distances))
    
    print "Loop method took {:4.4f} seconds\nNumpy method took {:4.4f} seconds".format(j_t2 - j_t1, a_t2 - a_t1)
    print "Results vary by an average of {:4.4f} with a std of {:4.4f}".format(avgdiff,stddiff)
    
    return 
Beispiel #22
0
    def predict(self, X, **kwargs):
        r"""
        Returns the predictive mean and variance of the objective function
        at X average over all hyperparameter samples.
        The mean is computed by:
        :math \mu(x) = \frac{1}{M}\sum_{i=1}^{M}\mu_m(x)
        And the variance by:
        :math \sigma^2(x) = (\frac{1}{M}\sum_{i=1}^{M}(\sigma^2_m(x) + \mu_m(x)^2) - \mu^2

        Parameters
        ----------
        X: np.ndarray (N, D)
            Input test points

        Returns
        ----------
        np.array(N,1)
            predictive mean
        np.array(N,1)
            predictive variance

        """

        # For EnvES we transform s to (1 - s)^2
        if self.basis_func is not None:
            X_test = deepcopy(X)
            X_test[:, self.dim] = self.basis_func(X_test[:, self.dim])
        else:
            X_test = X

        mu = np.zeros([self.n_hypers, X_test.shape[0]])
        var = np.zeros([self.n_hypers, X_test.shape[0]])
        for i, model in enumerate(self.models):
            mu[i], var[i] = model.predict(X_test)

        # See the algorithm runtime prediction paper by Hutter et al
        # for the derivation of the total variance
        m = np.array([[mu.mean()]])
        v = np.mean(mu ** 2 + var) - m ** 2

        # Clip negative variances and set them to the smallest
        # positive float value
        if v.shape[0] == 1:
            v = np.clip(v, np.finfo(v.dtype).eps, np.inf)
        else:
            v[np.diag_indices(v.shape[0])] = \
                    np.clip(v[np.diag_indices(v.shape[0])],
                            np.finfo(v.dtype).eps, np.inf)
            v[np.where((v < np.finfo(v.dtype).eps) & (v > -np.finfo(v.dtype).eps))] = 0

        return m, v
Beispiel #23
0
    def predict(self, X, full_cov=False):
        if self.m == None:
            print "ERROR: Model needs to be trained first."
            return None

        mean, var = self.m.predict(X, full_cov=full_cov)

        if not full_cov:
            return mean[:, 0], np.clip(var[:, 0], np.finfo(var.dtype).eps, np.inf)
            #return mean
        else:
            var[np.diag_indices(var.shape[0])] = np.clip(var[np.diag_indices(var.shape[0])], np.finfo(var.dtype).eps, np.inf)
            var[np.where((var < np.finfo(var.dtype).eps) & (var > -np.finfo(var.dtype).eps))] = 0
            return mean[:, 0], var
Beispiel #24
0
def _make_rdm1(mycc, d1, with_frozen=True, ao_repr=False):
    doo, dOO = d1[0]
    dov, dOV = d1[1]
    dvo, dVO = d1[2]
    dvv, dVV = d1[3]
    nocca, nvira = dov.shape
    noccb, nvirb = dOV.shape
    nmoa = nocca + nvira
    nmob = noccb + nvirb

    dm1a = numpy.empty((nmoa,nmoa), dtype=doo.dtype)
    dm1a[:nocca,:nocca] = doo + doo.conj().T
    dm1a[:nocca,nocca:] = dov + dvo.conj().T
    dm1a[nocca:,:nocca] = dm1a[:nocca,nocca:].conj().T
    dm1a[nocca:,nocca:] = dvv + dvv.conj().T
    dm1a *= .5
    dm1a[numpy.diag_indices(nocca)] += 1

    dm1b = numpy.empty((nmob,nmob), dtype=dOO.dtype)
    dm1b[:noccb,:noccb] = dOO + dOO.conj().T
    dm1b[:noccb,noccb:] = dOV + dVO.conj().T
    dm1b[noccb:,:noccb] = dm1b[:noccb,noccb:].conj().T
    dm1b[noccb:,noccb:] = dVV + dVV.conj().T
    dm1b *= .5
    dm1b[numpy.diag_indices(noccb)] += 1

    if with_frozen and not (mycc.frozen is 0 or mycc.frozen is None):
        nmoa = mycc.mo_occ[0].size
        nmob = mycc.mo_occ[1].size
        nocca = numpy.count_nonzero(mycc.mo_occ[0] > 0)
        noccb = numpy.count_nonzero(mycc.mo_occ[1] > 0)
        rdm1a = numpy.zeros((nmoa,nmoa), dtype=dm1a.dtype)
        rdm1b = numpy.zeros((nmob,nmob), dtype=dm1b.dtype)
        rdm1a[numpy.diag_indices(nocca)] = 1
        rdm1b[numpy.diag_indices(noccb)] = 1
        moidx = mycc.get_frozen_mask()
        moidxa = numpy.where(moidx[0])[0]
        moidxb = numpy.where(moidx[1])[0]
        rdm1a[moidxa[:,None],moidxa] = dm1a
        rdm1b[moidxb[:,None],moidxb] = dm1b
        dm1a = rdm1a
        dm1b = rdm1b

    if ao_repr:
        mo_a, mo_b = mycc.mo_coeff
        dm1a = lib.einsum('pi,ij,qj->pq', mo_a, dm1a, mo_a)
        dm1b = lib.einsum('pi,ij,qj->pq', mo_b, dm1b, mo_b)
    return dm1a, dm1b
Beispiel #25
0
def oas_cov(pts):
    r"""
    Estimate the covariance matrix using the Oracle Approximating Shrinkage algorithm

    .. math::

        (1 - s)\Sigma + s \mu \mathcal{I}_d

    where :math:`\mu = \mathrm{tr}(\Sigma) / d`.  This ensures the covariance matrix estimate is
    well behaved for small sample sizes.

    :param pts:
        An `(N, ndim)`-shaped array, containing `N` samples from the target distribution.


    This follows the implementation in `scikit-learn
    <https://github.com/scikit-learn/scikit-learn/blob/31c5497/
    sklearn/covariance/shrunk_covariance_.py>`_.
    """
    pts = np.atleast_2d(pts)
    npts, ndim = pts.shape

    emperical_cov = np.cov(pts.T)
    mean = np.trace(emperical_cov) / ndim

    alpha = np.mean(emperical_cov * emperical_cov)
    num = alpha + mean * mean
    den = (npts + 1) * (alpha - (mean * mean) / ndim)

    shrinkage = min(num / den, 1)
    shrunk_cov = (1 - shrinkage) * emperical_cov
    shrunk_cov[np.diag_indices(ndim)] += shrinkage * mean

    return shrunk_cov
Beispiel #26
0
def is_diagonal(A):
    """Returns true iff A is a diagonal matrix."""
    A = to_square_array(A)
    u = np.diag_indices(len(A))
    check = np.zeros_like(A)
    check[u] = A[u]
    return np.allclose(A, check)
 def decompose_tensor(self, rank, X=None, init='nvecs', lambda_A=10, lambda_R=10, *args, **kwargs):
     """Decompose adjacency tensor using RESCAL_ALS
     
     """   
     # if X not specified, used X attribute if exists
     if X is None:
         if hasattr(self, 'X'):
             X = self.X
         else:
             print("netCreate object has no X attribute. Need to Provide X argument.")
         
     # Set logging to INFO to see RESCAL information
     logging.basicConfig(level=logging.INFO)
     A, R, fit, itr, exectimes = als(X, rank=rank, init=init, lambda_A=lambda_A, lambda_R=lambda_R)
     self.rescal_params = {'rank':rank,'fit':fit,'lambda_A':lambda_A,'lambda_R':lambda_R}        
     self.A = A
     self.R = R   
     self.AAT = AATnn = np.dot(A,A.T)
     AATnn[AATnn < 0] = 0
     
     #make SIF: zeros on diags, non-negative values both upper & lower triangles
     # not row-normalized since some individuals are more likely to have more ties 
     SIF = AATnn 
     SIF[np.diag_indices(SIF.shape[0])] = 0
     self.SIF = SIF
     
     # remove upper triangle by keeping only lower triangle indexed values
     self.AATnn = np.tril(AATnn, k= -1)  # k = -1 to keep only below diagonal    
Beispiel #28
0
def dirichlet_covariance(alpha):
    r"""Covariance matrix for Dirichlet distribution.

    Parameters
    ----------
    alpha : (M, ) ndarray
        Parameters of Dirichlet distribution
    
    Returns
    -------
    cov : (M, M) ndarray
        Covariance matrix
        
    """
    alpha0 = alpha.sum()
    norm = alpha0 ** 2 * (alpha0 + 1.0)

    """Non normalized covariance"""
    Z = -alpha[:, np.newaxis] * alpha[np.newaxis, :]

    """Correct diagonal"""
    ind = np.diag_indices(Z.shape[0])
    Z[ind] += alpha0 * alpha

    """Covariance matrix"""
    cov = Z / norm

    return cov
Beispiel #29
0
def _translation_matrix_from_vector(v):
    """
    Compute a translation matrix T for vector v. If x is a 3-vector, then

        dot(T, y)[:3] = x + v

    where y is a 4-vector with an appended one: y[0:3] = x, y[3] = 1.

    Parameters
    ----------
    v : np.ndarray
        3-vector of a translation

    Returns
    -------
    T : np.ndarray
        A 4x4 translation matrix

    Citation
    --------
    ..[1] http://en.wikipedia.org/wiki/Translation_%28geometry%29#Matrix_
          representation
    """

    T = np.zeros((4,4), dtype=np.float64)
    di = np.diag_indices(4)

    T[di]    = 1.0
    T[0:3,3] = v.copy()

    return T
Beispiel #30
0
def clean_diagonal(H):
    """
    Removes the infinities along the diagonal, which makes the 
    matrix useable but doesn't change anything important
    """
    H[np.diag_indices(H.shape[0])] = 0
    return H
Beispiel #31
0
 def convert_to_matrix(x):
     G = np.zeros((n, n, k))
     G[np.triu_indices(n)] = x[-n*(n+1)//2:, :]
     G = G + np.transpose(G, [1, 0, 2]) 
     G[np.diag_indices(n)] = G[np.diag_indices(n)]/2
     return G
Beispiel #32
0
def quasiOT_sample():
    p = int(input("Number of levels ?\n"))
    r = int(input("Number of sampled random trajectories ?\n"))
    M = int(input("Number of generated random trajectories ?\n"))
    # p= 4
    # r= 50
    # M= 200
    datafile = easygui.fileopenbox(msg="Choose the input file (*.csv)",
                                   title="Open File",
                                   default="*.csv")

    while datafile is None:
        easygui.msgbox("No file is selected, please try again !")
        datafile = easygui.fileopenbox(msg="Choose the input file (*.csv)",
                                       title="Open File",
                                       default="*.csv")

    data = pd.read_csv(datafile, sep=',')

    datamatrix = data[['Min', 'Max']].values
    ColName = data['Parameter'].values
    k = len(ColName)
    datamatrix_diff = datamatrix[:, 1] - datamatrix[:, 0]
    B = np.append(np.zeros([1, k]), np.tril(np.ones([k, k])), axis=0)
    delta = p / (2 * (p - 1))
    J_1 = np.ones([k + 1, k])
    J_k = np.ones([k + 1, k])
    T = [0 for i in range(M)]
    i = 1
    A = np.eye(k)

    while i <= M:
        x_Star = np.random.randint(0, p - 1, [1, k]) / (p - 1)
        d = np.ones([1, k])
        d[np.random.rand(1, k) <= 0.5] = -1
        D_Star = np.eye(k)
        D_Star[np.diag_indices(k)] = d
        P_Star = A[np.random.permutation(k), :]
        B_Star = (J_1 * x_Star + (delta / 2) *
                  ((2 * B - J_k).dot(D_Star) + J_k)).dot(P_Star)

        if k > 10:
            for cp in np.nditer(np.arange(p / 2)):
                B_Star[B_Star == (cp - 0.5 * p) /
                       (p - 1)] = (cp + 0.5 * p) / (p - 1)
            for cp in np.nditer(np.arange(p / 2) + p / 2):
                B_Star[B_Star == (cp + 0.5 * p) /
                       (p - 1)] = (cp - 0.5 * p) / (p - 1)

        if np.min(B_Star) >= 0 and np.max(B_Star) <= 1:
            T[i - 1] = B_Star
            i = i + 1

    DIST = np.zeros([M, M])
    for i in range(2, M + 1):
        for j in range(1, i):
            DIST[i - 1,
                 j - 1] = np.sum(sdis.cdist(T[i - 1], T[j - 1], 'euclidean'))
            DIST[j - 1, i - 1] = DIST[i - 1, j - 1]

    vector = np.arange(1, M + 1)
    CombDist = np.zeros([M, 1])
    CombMatrix0 = nchoosek(vector, M - 1)

    for i in range(M, r, -1):
        if i == M:
            r_comb_matrix = nchoosek(vector, 2)
            CombDist_total = 0.
            for index in range(np.size(r_comb_matrix, 0)):
                CombDist_total = CombDist_total + (
                    DIST[r_comb_matrix[index, 1] - 1,
                         r_comb_matrix[index, 0] - 1])**2
            discard_element = np.arange(M, 0, -1)
            for j in range(i):
                CombDist[j] = CombDist_total - np.sum(
                    DIST[CombMatrix0[j, :] - 1, discard_element[j] - 1]**2)
        else:
            for j in range(i):
                CombDist[j] = CombDist[j] - np.sum(
                    DIST[CombMatrix0[j, :] - 1, discard_element - 1]**2)

        index = np.argmax(CombDist)
        old_vector = vector[:]
        vector = CombMatrix0[index, :]
        discard_element = np.setdiff1d(old_vector, vector)
        CombDist = np.delete(CombDist, index)
        CombMatrix0 = np.delete(CombMatrix0, index, 0)
        CombMatrix0 = CombMatrix0[CombMatrix0 != discard_element]
        CombMatrix0 = np.reshape(CombMatrix0, (i - 1, i - 2))

    best_comb = np.sort(vector)
    TrajectorySet = [T[i] for i in best_comb - 1]
    ParameterSet = TrajectorySet[:]

    datamatrix_diff_transver = np.matlib.repmat(datamatrix_diff, k + 1, 1)
    datamatrix_transver = np.matlib.repmat(np.transpose(datamatrix[:, 0]),
                                           k + 1, 1)

    for i in range(r):
        trajectory = TrajectorySet[i]
        ParameterSet[
            i] = datamatrix_transver + trajectory * datamatrix_diff_transver

    t = pd.DataFrame([], columns=ColName)

    for i in ParameterSet:
        t = t.append(pd.DataFrame(i, columns=ColName))

    t.to_csv(os.path.join(os.path.dirname(datafile), "quasiOT_sample.csv"),
             index=False,
             float_format='%.5f')
Beispiel #33
0
    for (l_x, l_y, L_x, L_y, z_range, z_bearing, Z_range, Z_bearing) in zip(detected_x, detected_y, real_x, real_y, z_range_array, z_bearing_array, Z_range_array, Z_bearing_array):   
        q = (L_x - x_t)**2 + (L_y - y_t)**2
        if H_t.size == 0:
            H_t = np.matrix([[-(L_x-x_t)/np.sqrt(q),-(L_y-y_t)/np.sqrt(q),0],[(L_y-y_t)/q,-(L_x-x_t)/q,-1]])
            Q_t_array = np.array([sigma_l_d**2,sigma_l_theta**2])
            zsensor_t = np.matrix([[z_range],[z_bearing]])
            zreal_t = np.matrix([[Z_range],[Z_bearing]])
        
        else: 
            H_t = np.vstack((H_t, np.matrix([[-(L_x-x_t)/np.sqrt(q),-(L_y-y_t)/np.sqrt(q),0],[(L_y-y_t)/q,-(L_x-x_t)/q,-1]])))
            Q_t_array = np.hstack((Q_t_array,np.array([sigma_l_d**2,sigma_l_theta**2])))
            zsensor_t = np.vstack((zsensor_t,np.matrix([[z_range],[z_bearing]])))
            zreal_t = np.vstack((zreal_t,np.matrix([[Z_range],[Z_bearing]])))

    Q_t = np.eye(Q_t_array.size)
    row, col = np.diag_indices(Q_t_array.shape[0])
    Q_t[row,col] = Q_t_array

    K_t = np.dot(np.dot(sigma_t_,H_t.T),np.linalg.inv(np.dot(np.dot(H_t,sigma_t_),H_t.T)+Q_t))
    INOVA = zsensor_t - zreal_t

    DeltaP = np.dot(K_t,INOVA)

    PoseR = getPose()

    DeltaP = {
    "th": DeltaP[2].item(),
    "x": DeltaP[0].item(),
    "y": DeltaP[1].item()
    }
    
Beispiel #34
0
def _fit_newton(f,
                score,
                start_params,
                fargs,
                kwargs,
                disp=True,
                maxiter=100,
                callback=None,
                retall=False,
                full_output=True,
                hess=None,
                ridge_factor=1e-10):
    tol = kwargs.setdefault('tol', 1e-8)
    iterations = 0
    oldparams = np.inf
    newparams = np.asarray(start_params)
    if retall:
        history = [oldparams, newparams]
    while (iterations < maxiter
           and np.any(np.abs(newparams - oldparams) > tol)):
        H = np.asarray(hess(newparams))
        # regularize Hessian, not clear what ridge factor should be
        # keyword option with absolute default 1e-10, see #1847
        if not np.all(ridge_factor == 0):
            H[np.diag_indices(H.shape[0])] += ridge_factor
        oldparams = newparams
        newparams = oldparams - np.dot(np.linalg.inv(H), score(oldparams))
        if retall:
            history.append(newparams)
        if callback is not None:
            callback(newparams)
        iterations += 1
    fval = f(newparams, *fargs)  # this is the negative likelihood
    if iterations == maxiter:
        warnflag = 1
        if disp:
            print("Warning: Maximum number of iterations has been "
                  "exceeded.")
            print("         Current function value: %f" % fval)
            print("         Iterations: %d" % iterations)
    else:
        warnflag = 0
        if disp:
            print("Optimization terminated successfully.")
            print("         Current function value: %f" % fval)
            print("         Iterations %d" % iterations)
    if full_output:
        (xopt, fopt, niter, gopt, hopt) = (newparams, f(newparams,
                                                        *fargs), iterations,
                                           score(newparams), hess(newparams))
        converged = not warnflag
        retvals = {
            'fopt': fopt,
            'iterations': niter,
            'score': gopt,
            'Hessian': hopt,
            'warnflag': warnflag,
            'converged': converged
        }
        if retall:
            retvals.update({'allvecs': history})

    else:
        retvals = newparams
        xopt = None

    return xopt, retvals
Beispiel #35
0
            test = test[1] #test part of the test set
            
            print "Train."        
            clf.fit(X_train[train], y_train[train])

            print "Predict."
            y_pred_proba = clf.predict_proba(X[test])
            predicted_probability[test] = y_pred_proba.copy()                    
            y_pred[test] = np.array([best_decision(prob_configuration, score_matrix=score_matrix)[0] for prob_configuration in y_pred_proba])

        y_pred_conf = np.zeros((nTrial, nCh, nCh))
        for i_trial in range(nTrial):
            for i_comb in range(order_combinations.shape[0]):
                y_pred_conf[i_trial][order_combinations[i_comb][:,None], order_combinations[i_comb]] += class_to_configuration(y_pred[i_trial*nComb+i_comb])
        
    
    print "Set zero the diagonal"
    for i_trial in range(nTrial):
        y_pred_conf[i_trial][np.diag_indices(nCh)] = 0
        y_level2_conf[i_trial][np.diag_indices(nCh)] = 0
    
    pwd_dest = 'data/'     
    filename_save = '%ssimulated_Ldataset_tws%d_r2_mse_granger_binary_class_rowNorm_fEng_cv.pickle' % (pwd_dest, time_window_size_test)
    print "Saving %s" % filename_save
    pickle.dump({'y_test_pred': y_pred_conf,
                 'y_test_true': y_level2_conf,
                 'predicted_probability': predicted_probability,
                 'score_matrix': score_matrix,                     
                 },
                open(filename_save, 'w'),
                protocol = pickle.HIGHEST_PROTOCOL)
Beispiel #36
0
    def call(self, x, mask=None):
        # TODO: validate input shape

        # The input of this layer is [L, mu, a] in concatenated form. We first split
        # those up.
        idx = 0
        if self.mode == 'full':
            L_flat = x[:, idx:idx + (self.nb_actions * self.nb_actions + self.nb_actions) // 2]
            idx += (self.nb_actions * self.nb_actions + self.nb_actions) // 2
        elif self.mode == 'diag':
            L_flat = x[:, idx:idx + self.nb_actions]
            idx += self.nb_actions
        else:
            L_flat = None
        assert L_flat is not None
        mu = x[:, idx:idx + self.nb_actions]
        idx += self.nb_actions
        a = x[:, idx:idx + self.nb_actions]
        idx += self.nb_actions

        if self.mode == 'full':
            # Create L and L^T matrix, which we use to construct the positive-definite matrix P.
            L = None
            LT = None
            if K._BACKEND == 'theano':
                import theano.tensor as T
                import theano

                def fn(x, L_acc, LT_acc):
                    x_ = K.zeros((self.nb_actions, self.nb_actions))
                    x_ = T.set_subtensor(x_[np.tril_indices(self.nb_actions)], x)
                    diag = K.exp(T.diag(x_))
                    x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], diag)
                    return x_, x_.T

                outputs_info = [
                    K.zeros((self.nb_actions, self.nb_actions)),
                    K.zeros((self.nb_actions, self.nb_actions)),
                ]
                results, _ = theano.scan(fn=fn, sequences=L_flat, outputs_info=outputs_info)
                L, LT = results
            elif K._BACKEND == 'tensorflow':
                import tensorflow as tf

                # Number of elements in a triangular matrix.
                nb_elems = (self.nb_actions * self.nb_actions + self.nb_actions) // 2

                # Create mask for the diagonal elements in L_flat. This is used to exponentiate
                # only the diagonal elements, which is done before gathering.
                diag_indeces = [0]
                for row in range(1, self.nb_actions):
                    diag_indeces.append(diag_indeces[-1] + (row + 1))
                diag_mask = np.zeros(1 + nb_elems)  # +1 for the leading zero
                diag_mask[np.array(diag_indeces) + 1] = 1
                diag_mask = K.variable(diag_mask)

                # Add leading zero element to each element in the L_flat. We use this zero
                # element when gathering L_flat into a lower triangular matrix L.
                nb_rows = tf.shape(L_flat)[0]
                zeros = tf.expand_dims(tf.tile(K.zeros((1,)), [nb_rows]), 1)
                L_flat = tf.concat(1, [zeros, L_flat])
                
                # Create mask that can be used to gather elements from L_flat and put them
                # into a lower triangular matrix.
                tril_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32')
                tril_mask[np.tril_indices(self.nb_actions)] = range(1, nb_elems + 1)
                
                # Finally, process each element of the batch.
                init = [
                    K.zeros((self.nb_actions, self.nb_actions)),
                    K.zeros((self.nb_actions, self.nb_actions)),
                ]
                
                def fn(a, x):
                    # Exponentiate everything. This is much easier than only exponentiating
                    # the diagonal elements, and, usually, the action space is relatively low.
                    x_ = K.exp(x)
                    # Only keep the diagonal elements.
                    x_ *= diag_mask
                    # Add the original, non-diagonal elements.
                    x_ += x * (1. - diag_mask)
                    # Finally, gather everything into a lower triangular matrix.
                    L_ = tf.gather(x_, tril_mask)
                    return [L_, tf.transpose(L_)]

                tmp = tf.scan(fn, L_flat, initializer=init)
                if isinstance(tmp, (list, tuple)):
                    # TensorFlow 0.10 now returns a tuple of tensors.
                    L, LT = tmp
                else:
                    # Old TensorFlow < 0.10 returns a shared tensor.
                    L = tmp[:, 0, :, :]
                    LT = tmp[:, 1, :, :]
            else:
                raise RuntimeError('Unknown Keras backend "{}".'.format(K._BACKEND))
            assert L is not None
            assert LT is not None
            P = K.batch_dot(L, LT)
        elif self.mode == 'diag':
            if K._BACKEND == 'theano':
                import theano.tensor as T
                import theano

                def fn(x, P_acc):
                    x_ = K.zeros((self.nb_actions, self.nb_actions))
                    x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], x)
                    return x_

                outputs_info = [
                    K.zeros((self.nb_actions, self.nb_actions)),
                ]
                P, _ = theano.scan(fn=fn, sequences=L_flat, outputs_info=outputs_info)
                print(P)
            elif K._BACKEND == 'tensorflow':
                import tensorflow as tf

                # Create mask that can be used to gather elements from L_flat and put them
                # into a diagonal matrix.
                diag_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32')
                diag_mask[np.diag_indices(self.nb_actions)] = range(1, self.nb_actions + 1)

                # Add leading zero element to each element in the L_flat. We use this zero
                # element when gathering L_flat into a lower triangular matrix L.
                nb_rows = tf.shape(L_flat)[0]
                zeros = tf.expand_dims(tf.tile(K.zeros((1,)), [nb_rows]), 1)
                L_flat = tf.concat(1, [zeros, L_flat])

                # Finally, process each element of the batch.
                def fn(a, x):
                    x_ = tf.gather(x, diag_mask)
                    return x_

                P = tf.scan(fn, L_flat, initializer=K.zeros((self.nb_actions, self.nb_actions)))
            else:
                raise RuntimeError('Unknown Keras backend "{}".'.format(K._BACKEND))
        assert P is not None
        assert K.ndim(P) == 3

        # Combine a, mu and P into a scalar (over the batches). What we compute here is
        # -.5 * (a - mu)^T * P * (a - mu), where * denotes the dot-product. Unfortunately
        # TensorFlow handles vector * P slightly suboptimal, hence we convert the vectors to
        # 1xd/dx1 matrices and finally flatten the resulting 1x1 matrix into a scalar. All
        # operations happen over the batch size, which is dimension 0.
        prod = K.batch_dot(K.expand_dims(a - mu, dim=1), P)
        prod = K.batch_dot(prod, K.expand_dims(a - mu, dim=-1))
        A = -.5 * K.batch_flatten(prod)
        assert K.ndim(A) == 2
        return A
Beispiel #37
0
def nearestSPD(A):
    """
    Nearest Symmetric Positive Definite matrix to A.

    The Frobenius norm is used: the rms difference of the elements.

    Parameters
    ----------
    A : ndarray, 2-D
        Matrix that should be SPD, but might not be, perhaps because
        of floating point arithmetic, or limitations in the data
        available to estimate its elements.

    Returns
    -------
    Ahat : ndarray, 2-D
        (Almost) nearest positive definite matrix to A

    Notes
    -----
    From Higham: "The nearest symmetric positive semidefinite matrix in the
    Frobenius norm to an arbitrary real matrix A is shown to be (B + H)/2,
    where H is the symmetric polar factor of B=(A + A')/2."
    http://www.sciencedirect.com/science/article/pii/0024379588902236

    Code and docstring are based on the Matlab m-file by  John D'Errico:
    http://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd

    The tweaking method was changed to be more robust in the common
    pathological case, where more than one eigenvalue is near zero, and
    when one eigenvalue is exactly zero.  To be conservative, we are
    always nudging the diagonal to larger values.
    """

    # Ensure symmetry:
    B = (A + A.T) / 2

    # Symmetric polar factor, H: (in numpy svd, B = U S V, not U S V').
    U, S, V = np.linalg.svd(B)
    H = np.dot(V.T * S, V)

    Ahat = (B + H) / 2

    Ahat = (Ahat + Ahat.T) / 2

    # At this point, given floating point errors and differences
    # among algorithms, Ahat might be on the PD boundary, or too
    # close to it for some numerical operations. Adjust it:

    n = A.shape[0]
    k = 0
    # The "k == 0" is included to avoid a warning from
    # np.random.multivariate_normal, which seems to have a PD-detection
    # algorithm that occasionally fails on matrices that pass the
    # cholesky test.
    while k == 0 or not _is_PD(Ahat):
        k += 1
        # Tweaking strategy differs from D'Errico version.  It
        # is still a very small adjustment, but much larger than
        # his.
        # Eigvals are or can be complex dtype, so take abs().
        maxeig = np.abs(np.linalg.eigvals(Ahat)).max()
        Ahat[np.diag_indices(n)] += np.spacing(maxeig)
        # Normally no more than one adjustment will be needed.
        if k > 100:
            warnings.warn("adjustment in nearestSPD did not converge; "
                          "returning diagonal")
            return np.diag(np.diag(A))
    return Ahat
Beispiel #38
0
def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
                             node_angles=None, node_width=None,
                             node_colors=None, facecolor='black',
                             textcolor='white', node_edgecolor='black',
                             linewidth=1.5, colormap='hot', vmin=None,
                             vmax=None, colorbar=True, title=None,
                             colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
                             fontsize_title=12, fontsize_names=8,
                             fontsize_colorbar=8, padding=6.,
                             fig=None, subplot=111, interactive=True,
                             node_linewidth=2.):
    """Visualize connectivity as a circular graph.

    Note: This code is based on the circle graph example by Nicolas P. Rougier
    http://www.labri.fr/perso/nrougier/coding/.

    Parameters
    ----------
    con : array
        Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
        array is provided, "indices" has to be used to define the connection
        indices.
    node_names : list of str
        Node names. The order corresponds to the order in con.
    indices : tuple of arrays | None
        Two arrays with indices of connections for which the connections
        strenghts are defined in con. Only needed if con is a 1D array.
    n_lines : int | None
        If not None, only the n_lines strongest connections (strength=abs(con))
        are drawn.
    node_angles : array, shape=(len(node_names,)) | None
        Array with node positions in degrees. If None, the nodes are equally
        spaced on the circle. See mne.viz.circular_layout.
    node_width : float | None
        Width of each node in degrees. If None, the minimum angle between any
        two nodes is used as the width.
    node_colors : list of tuples | list of str
        List with the color to use for each node. If fewer colors than nodes
        are provided, the colors will be repeated. Any color supported by
        matplotlib can be used, e.g., RGBA tuples, named colors.
    facecolor : str
        Color to use for background. See matplotlib.colors.
    textcolor : str
        Color to use for text. See matplotlib.colors.
    node_edgecolor : str
        Color to use for lines around nodes. See matplotlib.colors.
    linewidth : float
        Line width to use for connections.
    colormap : str
        Colormap to use for coloring the connections.
    vmin : float | None
        Minimum value for colormap. If None, it is determined automatically.
    vmax : float | None
        Maximum value for colormap. If None, it is determined automatically.
    colorbar : bool
        Display a colorbar or not.
    title : str
        The figure title.
    colorbar_size : float
        Size of the colorbar.
    colorbar_pos : 2-tuple
        Position of the colorbar.
    fontsize_title : int
        Font size to use for title.
    fontsize_names : int
        Font size to use for node names.
    fontsize_colorbar : int
        Font size to use for colorbar.
    padding : float
        Space to add around figure to accommodate long labels.
    fig : None | instance of matplotlib.pyplot.Figure
        The figure to use. If None, a new figure with the specified background
        color will be created.
    subplot : int | 3-tuple
        Location of the subplot when creating figures with multiple plots. E.g.
        121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
        matplotlib.pyplot.subplot.
    interactive : bool
        When enabled, left-click on a node to show only connections to that
        node. Right-click shows all connections.
    node_linewidth : float
        Line with for nodes.

    Returns
    -------
    fig : instance of matplotlib.pyplot.Figure
        The figure handle.
    axes : instance of matplotlib.axes.PolarAxesSubplot
        The subplot handle.
    """
    import matplotlib.pyplot as plt
    import matplotlib.path as m_path
    import matplotlib.patches as m_patches

    n_nodes = len(node_names)

    if node_angles is not None:
        if len(node_angles) != n_nodes:
            raise ValueError('node_angles has to be the same length '
                             'as node_names')
        # convert it to radians
        node_angles = node_angles * np.pi / 180
    else:
        # uniform layout on unit circle
        node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)

    if node_width is None:
        # widths correspond to the minimum angle between two nodes
        dist_mat = node_angles[None, :] - node_angles[:, None]
        dist_mat[np.diag_indices(n_nodes)] = 1e9
        node_width = np.min(np.abs(dist_mat))
    else:
        node_width = node_width * np.pi / 180

    if node_colors is not None:
        if len(node_colors) < n_nodes:
            node_colors = cycle(node_colors)
    else:
        # assign colors using colormap
        node_colors = [plt.cm.spectral(i / float(n_nodes))
                       for i in range(n_nodes)]

    # handle 1D and 2D connectivity information
    if con.ndim == 1:
        if indices is None:
            raise ValueError('indices has to be provided if con.ndim == 1')
    elif con.ndim == 2:
        if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
            raise ValueError('con has to be 1D or a square matrix')
        # we use the lower-triangular part
        indices = tril_indices(n_nodes, -1)
        con = con[indices]
    else:
        raise ValueError('con has to be 1D or a square matrix')

    # get the colormap
    if isinstance(colormap, string_types):
        colormap = plt.get_cmap(colormap)

    # Make figure background the same colors as axes
    if fig is None:
        fig = plt.figure(figsize=(8, 8), facecolor=facecolor)

    # Use a polar axes
    if not isinstance(subplot, tuple):
        subplot = (subplot,)
    axes = plt.subplot(*subplot, polar=True, axisbg=facecolor)

    # No ticks, we'll put our own
    plt.xticks([])
    plt.yticks([])

    # Set y axes limit, add additonal space if requested
    plt.ylim(0, 10 + padding)

    # Remove the black axes border which may obscure the labels
    axes.spines['polar'].set_visible(False)

    # Draw lines between connected nodes, only draw the strongest connections
    if n_lines is not None and len(con) > n_lines:
        con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
    else:
        con_thresh = 0.

    # get the connections which we are drawing and sort by connection strength
    # this will allow us to draw the strongest connections first
    con_abs = np.abs(con)
    con_draw_idx = np.where(con_abs >= con_thresh)[0]

    con = con[con_draw_idx]
    con_abs = con_abs[con_draw_idx]
    indices = [ind[con_draw_idx] for ind in indices]

    # now sort them
    sort_idx = np.argsort(con_abs)
    con_abs = con_abs[sort_idx]
    con = con[sort_idx]
    indices = [ind[sort_idx] for ind in indices]

    # Get vmin vmax for color scaling
    if vmin is None:
        vmin = np.min(con[np.abs(con) >= con_thresh])
    if vmax is None:
        vmax = np.max(con)
    vrange = vmax - vmin

    # We want to add some "noise" to the start and end position of the
    # edges: We modulate the noise with the number of connections of the
    # node and the connection strength, such that the strongest connections
    # are closer to the node center
    nodes_n_con = np.zeros((n_nodes), dtype=np.int)
    for i, j in zip(indices[0], indices[1]):
        nodes_n_con[i] += 1
        nodes_n_con[j] += 1

    # initalize random number generator so plot is reproducible
    rng = np.random.mtrand.RandomState(seed=0)

    n_con = len(indices[0])
    noise_max = 0.25 * node_width
    start_noise = rng.uniform(-noise_max, noise_max, n_con)
    end_noise = rng.uniform(-noise_max, noise_max, n_con)

    nodes_n_con_seen = np.zeros_like(nodes_n_con)
    for i, (start, end) in enumerate(zip(indices[0], indices[1])):
        nodes_n_con_seen[start] += 1
        nodes_n_con_seen[end] += 1

        start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
                           float(nodes_n_con[start]))
        end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
                         float(nodes_n_con[end]))

    # scale connectivity for colormap (vmin<=>0, vmax<=>1)
    con_val_scaled = (con - vmin) / vrange

    # Finally, we draw the connections
    for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
        # Start point
        t0, r0 = node_angles[i], 10

        # End point
        t1, r1 = node_angles[j], 10

        # Some noise in start and end point
        t0 += start_noise[pos]
        t1 += end_noise[pos]

        verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
        codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
                 m_path.Path.LINETO]
        path = m_path.Path(verts, codes)

        color = colormap(con_val_scaled[pos])

        # Actual line
        patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
                                    linewidth=linewidth, alpha=1.)
        axes.add_patch(patch)

    # Draw ring with colored nodes
    height = np.ones(n_nodes) * 1.0
    bars = axes.bar(node_angles, height, width=node_width, bottom=9,
                    edgecolor=node_edgecolor, lw=node_linewidth,
                    facecolor='.9', align='center')

    for bar, color in zip(bars, node_colors):
        bar.set_facecolor(color)

    # Draw node labels
    angles_deg = 180 * node_angles / np.pi
    for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
        if angle_deg >= 270:
            ha = 'left'
        else:
            # Flip the label, so text is always upright
            angle_deg += 180
            ha = 'right'

        axes.text(angle_rad, 10.4, name, size=fontsize_names,
                  rotation=angle_deg, rotation_mode='anchor',
                  horizontalalignment=ha, verticalalignment='center',
                  color=textcolor)

    if title is not None:
        plt.title(title, color=textcolor, fontsize=fontsize_title,
                  axes=axes)

    if colorbar:
        norm = normalize_colors(vmin=vmin, vmax=vmax)
        sm = plt.cm.ScalarMappable(cmap=colormap, norm=norm)
        sm.set_array(np.linspace(vmin, vmax))
        cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
                          shrink=colorbar_size,
                          anchor=colorbar_pos)
        cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
        cb.ax.tick_params(labelsize=fontsize_colorbar)
        plt.setp(cb_yticks, color=textcolor)

    # Add callback for interaction
    if interactive:
        callback = partial(_plot_connectivity_circle_onpick, fig=fig,
                           axes=axes, indices=indices, n_nodes=n_nodes,
                           node_angles=node_angles)

        fig.canvas.mpl_connect('button_press_event', callback)

    return fig, axes
Beispiel #39
0
def make_rdm2(mp, t2=None):
    r'''
    Two-particle spin density matrices dm2aa, dm2ab, dm2bb in MO basis

    dm2aa[p,q,r,s] = <q_alpha^\dagger s_alpha^\dagger r_alpha p_alpha>
    dm2ab[p,q,r,s] = <q_alpha^\dagger s_beta^\dagger r_beta p_alpha>
    dm2bb[p,q,r,s] = <q_beta^\dagger s_beta^\dagger r_beta p_beta>

    (p,q correspond to one particle and r,s correspond to another particle)
    Two-particle density matrix should be contracted to integrals with the
    pattern below to compute energy

    E = numpy.einsum('pqrs,pqrs', eri_aa, dm2_aa)
    E+= numpy.einsum('pqrs,pqrs', eri_ab, dm2_ab)
    E+= numpy.einsum('pqrs,rspq', eri_ba, dm2_ab)
    E+= numpy.einsum('pqrs,pqrs', eri_bb, dm2_bb)

    where eri_aa[p,q,r,s] = (p_alpha q_alpha | r_alpha s_alpha )
    eri_ab[p,q,r,s] = ( p_alpha q_alpha | r_beta s_beta )
    eri_ba[p,q,r,s] = ( p_beta q_beta | r_alpha s_alpha )
    eri_bb[p,q,r,s] = ( p_beta q_beta | r_beta s_beta )
    '''
    if t2 is None: t2 = mp.t2
    nmoa, nmob = nmoa0, nmob0 = mp.nmo
    nocca, noccb = nocca0, noccb0 = mp.nocc
    t2aa, t2ab, t2bb = t2

    if not (mp.frozen is 0 or mp.frozen is None):
        nmoa0 = mp.mo_occ[0].size
        nmob0 = mp.mo_occ[1].size
        nocca0 = numpy.count_nonzero(mp.mo_occ[0] > 0)
        noccb0 = numpy.count_nonzero(mp.mo_occ[1] > 0)
        moidxa, moidxb = mp.get_frozen_mask()
        oidxa = numpy.where(moidxa & (mp.mo_occ[0] > 0))[0]
        vidxa = numpy.where(moidxa & (mp.mo_occ[0] == 0))[0]
        oidxb = numpy.where(moidxb & (mp.mo_occ[1] > 0))[0]
        vidxb = numpy.where(moidxb & (mp.mo_occ[1] == 0))[0]

        dm2aa = numpy.zeros((nmoa0, nmoa0, nmoa0, nmoa0), dtype=t2aa.dtype)
        dm2ab = numpy.zeros((nmoa0, nmoa0, nmob0, nmob0), dtype=t2aa.dtype)
        dm2bb = numpy.zeros((nmob0, nmob0, nmob0, nmob0), dtype=t2aa.dtype)

        tmp = t2aa.transpose(0, 2, 1, 3)
        dm2aa[oidxa[:, None, None, None], vidxa[:, None, None], oidxa[:, None],
              vidxa] = tmp
        dm2aa[vidxa[:, None, None, None], oidxa[:, None, None], vidxa[:, None],
              oidxa] = tmp.conj().transpose(1, 0, 3, 2)

        tmp = t2bb.transpose(0, 2, 1, 3)
        dm2bb[oidxb[:, None, None, None], vidxb[:, None, None], oidxb[:, None],
              vidxb] = tmp
        dm2bb[vidxb[:, None, None, None], oidxb[:, None, None], vidxb[:, None],
              oidxb] = tmp.conj().transpose(1, 0, 3, 2)

        dm2ab[oidxa[:, None, None, None], vidxa[:, None, None], oidxb[:, None],
              vidxb] = t2ab.transpose(0, 2, 1, 3)
        dm2ab[vidxa[:, None, None, None], oidxa[:, None, None], vidxb[:, None],
              oidxb] = t2ab.conj().transpose(2, 0, 3, 1)

    else:
        dm2aa = numpy.zeros((nmoa0, nmoa0, nmoa0, nmoa0), dtype=t2aa.dtype)
        dm2ab = numpy.zeros((nmoa0, nmoa0, nmob0, nmob0), dtype=t2aa.dtype)
        dm2bb = numpy.zeros((nmob0, nmob0, nmob0, nmob0), dtype=t2aa.dtype)

        #:tmp = (t2aa.transpose(0,2,1,3) - t2aa.transpose(0,3,1,2)) * .5
        #: t2aa.transpose(0,2,1,3) == -t2aa.transpose(0,3,1,2)
        tmp = t2aa.transpose(0, 2, 1, 3)
        dm2aa[:nocca0, nocca0:, :nocca0, nocca0:] = tmp
        dm2aa[nocca0:, :nocca0,
              nocca0:, :nocca0] = tmp.conj().transpose(1, 0, 3, 2)

        tmp = t2bb.transpose(0, 2, 1, 3)
        dm2bb[:noccb0, noccb0:, :noccb0, noccb0:] = tmp
        dm2bb[noccb0:, :noccb0,
              noccb0:, :noccb0] = tmp.conj().transpose(1, 0, 3, 2)

        dm2ab[:nocca0, nocca0:, :noccb0, noccb0:] = t2ab.transpose(0, 2, 1, 3)
        dm2ab[nocca0:, :nocca0, noccb0:, :noccb0] = t2ab.transpose(2, 0, 3,
                                                                   1).conj()

    dm1a, dm1b = make_rdm1(mp, t2)
    dm1a[numpy.diag_indices(nocca0)] -= 1
    dm1b[numpy.diag_indices(noccb0)] -= 1

    for i in range(nocca0):
        dm2aa[i, i, :, :] += dm1a.T
        dm2aa[:, :, i, i] += dm1a.T
        dm2aa[:, i, i, :] -= dm1a.T
        dm2aa[i, :, :, i] -= dm1a
        dm2ab[i, i, :, :] += dm1b.T
    for i in range(noccb0):
        dm2bb[i, i, :, :] += dm1b.T
        dm2bb[:, :, i, i] += dm1b.T
        dm2bb[:, i, i, :] -= dm1b.T
        dm2bb[i, :, :, i] -= dm1b
        dm2ab[:, :, i, i] += dm1a.T

    for i in range(nocca0):
        for j in range(nocca0):
            dm2aa[i, i, j, j] += 1
            dm2aa[i, j, j, i] -= 1
    for i in range(noccb0):
        for j in range(noccb0):
            dm2bb[i, i, j, j] += 1
            dm2bb[i, j, j, i] -= 1
    for i in range(nocca0):
        for j in range(noccb0):
            dm2ab[i, i, j, j] += 1

    return dm2aa, dm2ab, dm2bb
Beispiel #40
0
    def __init__(self, endog, exog=None, order=(1, 0), trend='c',
                 error_cov_type='unstructured', measurement_error=False,
                 enforce_stationarity=True, enforce_invertibility=True,
                 trend_offset=1, **kwargs):

        # Model parameters
        self.error_cov_type = error_cov_type
        self.measurement_error = measurement_error
        self.enforce_stationarity = enforce_stationarity
        self.enforce_invertibility = enforce_invertibility

        # Save the given orders
        self.order = order

        # Model orders
        self.k_ar = int(order[0])
        self.k_ma = int(order[1])

        # Check for valid model
        if error_cov_type not in ['diagonal', 'unstructured']:
            raise ValueError('Invalid error covariance matrix type'
                             ' specification.')
        if self.k_ar == 0 and self.k_ma == 0:
            raise ValueError('Invalid VARMAX(p,q) specification; at least one'
                             ' p,q must be greater than zero.')

        # Warn for VARMA model
        if self.k_ar > 0 and self.k_ma > 0:
            warn('Estimation of VARMA(p,q) models is not generically robust,'
                 ' due especially to identification issues.',
                 EstimationWarning)

        # Trend
        self.trend = trend
        self.trend_offset = trend_offset
        self.polynomial_trend, self.k_trend = prepare_trend_spec(self.trend)
        self._trend_is_const = (self.polynomial_trend.size == 1 and
                                self.polynomial_trend[0] == 1)

        # Exogenous data
        (self.k_exog, exog) = prepare_exog(exog)

        # Note: at some point in the future might add state regression, as in
        # SARIMAX.
        self.mle_regression = self.k_exog > 0

        # We need to have an array or pandas at this point
        if not _is_using_pandas(endog, None):
            endog = np.asanyarray(endog)

        # Model order
        # Used internally in various places
        _min_k_ar = max(self.k_ar, 1)
        self._k_order = _min_k_ar + self.k_ma

        # Number of states
        k_endog = endog.shape[1]
        k_posdef = k_endog
        k_states = k_endog * self._k_order

        # By default, initialize as stationary
        kwargs.setdefault('initialization', 'stationary')

        # By default, use LU decomposition
        kwargs.setdefault('inversion_method', INVERT_UNIVARIATE | SOLVE_LU)

        # Initialize the state space model
        super(VARMAX, self).__init__(
            endog, exog=exog, k_states=k_states, k_posdef=k_posdef, **kwargs
        )

        # Set as time-varying model if we have time-trend or exog
        if self.k_exog > 0 or (self.k_trend > 0 and not self._trend_is_const):
            self.ssm._time_invariant = False

        # Initialize the parameters
        self.parameters = OrderedDict()
        self.parameters['trend'] = self.k_endog * self.k_trend
        self.parameters['ar'] = self.k_endog**2 * self.k_ar
        self.parameters['ma'] = self.k_endog**2 * self.k_ma
        self.parameters['regression'] = self.k_endog * self.k_exog
        if self.error_cov_type == 'diagonal':
            self.parameters['state_cov'] = self.k_endog
        # These parameters fill in a lower-triangular matrix which is then
        # dotted with itself to get a positive definite matrix.
        elif self.error_cov_type == 'unstructured':
            self.parameters['state_cov'] = (
                int(self.k_endog * (self.k_endog + 1) / 2)
            )
        self.parameters['obs_cov'] = self.k_endog * self.measurement_error
        self.k_params = sum(self.parameters.values())

        # Initialize trend data
        self._trend_data = prepare_trend_data(
            self.polynomial_trend, self.k_trend, self.nobs,
            offset=self.trend_offset)

        # Initialize known elements of the state space matrices

        # If we have exog effects, then the state intercept needs to be
        # time-varying
        if (self.k_trend > 0 and not self._trend_is_const) or self.k_exog > 0:
            self.ssm['state_intercept'] = np.zeros((self.k_states, self.nobs))
            # self.ssm['obs_intercept'] = np.zeros((self.k_endog, self.nobs))

        # The design matrix is just an identity for the first k_endog states
        idx = np.diag_indices(self.k_endog)
        self.ssm[('design',) + idx] = 1

        # The transition matrix is described in four blocks, where the upper
        # left block is in companion form with the autoregressive coefficient
        # matrices (so it is shaped k_endog * k_ar x k_endog * k_ar) ...
        if self.k_ar > 0:
            idx = np.diag_indices((self.k_ar - 1) * self.k_endog)
            idx = idx[0] + self.k_endog, idx[1]
            self.ssm[('transition',) + idx] = 1
        # ... and the  lower right block is in companion form with zeros as the
        # coefficient matrices (it is shaped k_endog * k_ma x k_endog * k_ma).
        idx = np.diag_indices((self.k_ma - 1) * self.k_endog)
        idx = (idx[0] + (_min_k_ar + 1) * self.k_endog,
               idx[1] + _min_k_ar * self.k_endog)
        self.ssm[('transition',) + idx] = 1

        # The selection matrix is described in two blocks, where the upper
        # block selects the all k_posdef errors in the first k_endog rows
        # (the upper block is shaped k_endog * k_ar x k) and the lower block
        # also selects all k_posdef errors in the first k_endog rows (the lower
        # block is shaped k_endog * k_ma x k).
        idx = np.diag_indices(self.k_endog)
        self.ssm[('selection',) + idx] = 1
        idx = idx[0] + _min_k_ar * self.k_endog, idx[1]
        if self.k_ma > 0:
            self.ssm[('selection',) + idx] = 1

        # Cache some indices
        if self._trend_is_const and self.k_exog == 0:
            self._idx_state_intercept = np.s_['state_intercept', :k_endog, :]
        elif self.k_trend > 0 or self.k_exog > 0:
            self._idx_state_intercept = np.s_['state_intercept', :k_endog, :-1]
        if self.k_ar > 0:
            self._idx_transition = np.s_['transition', :k_endog, :]
        else:
            self._idx_transition = np.s_['transition', :k_endog, k_endog:]
        if self.error_cov_type == 'diagonal':
            self._idx_state_cov = (
                ('state_cov',) + np.diag_indices(self.k_endog))
        elif self.error_cov_type == 'unstructured':
            self._idx_lower_state_cov = np.tril_indices(self.k_endog)
        if self.measurement_error:
            self._idx_obs_cov = ('obs_cov',) + np.diag_indices(self.k_endog)

        # Cache some slices
        def _slice(key, offset):
            length = self.parameters[key]
            param_slice = np.s_[offset:offset + length]
            offset += length
            return param_slice, offset

        offset = 0
        self._params_trend, offset = _slice('trend', offset)
        self._params_ar, offset = _slice('ar', offset)
        self._params_ma, offset = _slice('ma', offset)
        self._params_regression, offset = _slice('regression', offset)
        self._params_state_cov, offset = _slice('state_cov', offset)
        self._params_obs_cov, offset = _slice('obs_cov', offset)

        # Update _init_keys attached by super
        self._init_keys += ['order', 'trend', 'error_cov_type',
                            'measurement_error', 'enforce_stationarity',
                            'enforce_invertibility'] + list(kwargs.keys())
Beispiel #41
0
def diag_indices(*args, **kwargs):
    return tuple(map(tf.convert_to_tensor, _np.diag_indices(*args, **kwargs)))
def _logm_triu(T):
    """
    Compute matrix logarithm of an upper triangular matrix.

    The matrix logarithm is the inverse of
    expm: expm(logm(`T`)) == `T`

    Parameters
    ----------
    T : (N, N) array_like
        Upper triangular matrix whose logarithm to evaluate

    Returns
    -------
    logm : (N, N) ndarray
        Matrix logarithm of `T`

    References
    ----------
    .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
           "Improved Inverse Scaling and Squaring Algorithms
           for the Matrix Logarithm."
           SIAM Journal on Scientific Computing, 34 (4). C152-C169.
           ISSN 1095-7197

    .. [2] Nicholas J. Higham (2008)
           "Functions of Matrices: Theory and Computation"
           ISBN 978-0-898716-46-7

    .. [3] Nicholas J. Higham and Lijing lin (2011)
           "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
           SIAM Journal on Matrix Analysis and Applications,
           32 (3). pp. 1056-1078. ISSN 0895-4798

    """
    T = np.asarray(T)
    if len(T.shape) != 2 or T.shape[0] != T.shape[1]:
        raise ValueError('expected an upper triangular square matrix')
    n, n = T.shape

    # Construct T0 with the appropriate type,
    # depending on the dtype and the spectrum of T.
    T_diag = np.diag(T)
    keep_it_real = (not _has_complex_dtype_char(T)) and (np.min(T_diag) >= 0)
    if keep_it_real:
        T0 = T
    else:
        T0 = T.astype(complex)

    # Define bounds given in Table (2.1).
    theta = (None, 1.59e-5, 2.31e-3, 1.94e-2, 6.21e-2, 1.28e-1, 2.06e-1,
             2.88e-1, 3.67e-1, 4.39e-1, 5.03e-1, 5.60e-1, 6.09e-1, 6.52e-1,
             6.89e-1, 7.21e-1, 7.49e-1)

    R, s, m = _inverse_squaring_helper(T0, theta)

    # Evaluate U = 2**s r_m(T - I) using the partial fraction expansion (1.1).
    # This requires the nodes and weights
    # corresponding to degree-m Gauss-Legendre quadrature.
    # These quadrature arrays need to be transformed from the [-1, 1] interval
    # to the [0, 1] interval.
    nodes, weights = scipy.special.p_roots(m)
    nodes = nodes.real
    if nodes.shape != (m, ) or weights.shape != (m, ):
        raise Exception('internal error')
    nodes = 0.5 + 0.5 * nodes
    weights = 0.5 * weights
    ident = np.identity(n)
    U = np.zeros_like(R)
    for alpha, beta in zip(weights, nodes):
        U += solve_triangular(ident + beta * R, alpha * R)
    U *= np.exp2(s)

    # Skip this step if the principal branch
    # does not exist at T0; this happens when a diagonal entry of T0
    # is negative with imaginary part 0.
    has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
    if has_principal_branch:

        # Recompute diagonal entries of U.
        U[np.diag_indices(n)] = np.log(np.diag(T0))

        # Recompute superdiagonal entries of U.
        # This indexing of this code should be renovated
        # when newer np.diagonal() becomes available.
        for i in range(n - 1):
            l1 = T0[i, i]
            l2 = T0[i + 1, i + 1]
            t12 = T0[i, i + 1]
            U[i, i + 1] = _logm_superdiag_entry(l1, l2, t12)

    # Return the logm of the upper triangular matrix.
    if not np.array_equal(U, np.triu(U)):
        raise Exception('internal inconsistency')
    return U
def _remainder_matrix_power_triu(T, t):
    """
    Compute a fractional power of an upper triangular matrix.

    The fractional power is restricted to fractions -1 < t < 1.
    This uses algorithm (3.1) of [1]_.
    The Pade approximation itself uses algorithm (4.1) of [2]_.

    Parameters
    ----------
    T : (N, N) array_like
        Upper triangular matrix whose fractional power to evaluate.
    t : float
        Fractional power between -1 and 1 exclusive.

    Returns
    -------
    X : (N, N) array_like
        The fractional power of the matrix.

    References
    ----------
    .. [1] Nicholas J. Higham and Lijing Lin (2013)
           "An Improved Schur-Pade Algorithm for Fractional Powers
           of a Matrix and their Frechet Derivatives."

    .. [2] Nicholas J. Higham and Lijing lin (2011)
           "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
           SIAM Journal on Matrix Analysis and Applications,
           32 (3). pp. 1056-1078. ISSN 0895-4798

    """
    m_to_theta = {
        1: 1.51e-5,
        2: 2.24e-3,
        3: 1.88e-2,
        4: 6.04e-2,
        5: 1.24e-1,
        6: 2.00e-1,
        7: 2.79e-1,
    }
    n, n = T.shape
    T0 = T
    T0_diag = np.diag(T0)
    if np.array_equal(T0, np.diag(T0_diag)):
        U = np.diag(T0_diag**t)
    else:
        R, s, m = _inverse_squaring_helper(T0, m_to_theta)

        # Evaluate the Pade approximation.
        # Note that this function expects the negative of the matrix
        # returned by the inverse squaring helper.
        U = _fractional_power_pade(-R, t, m)

        # Undo the inverse scaling and squaring.
        # Be less clever about this
        # if the principal branch does not exist at T0;
        # this happens when a diagonal entry of T0
        # is negative with imaginary part 0.
        eivals = np.diag(T0)
        has_principal_branch = all(x.real > 0 or x.imag != 0 for x in eivals)
        for i in range(s, -1, -1):
            if i < s:
                U = U.dot(U)
            else:
                if has_principal_branch:
                    p = t * np.exp2(-i)
                    U[np.diag_indices(n)] = T0_diag**p
                    for j in range(n - 1):
                        l1 = T0[j, j]
                        l2 = T0[j + 1, j + 1]
                        t12 = T0[j, j + 1]
                        f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
                        U[j, j + 1] = f12
    if not np.array_equal(U, np.triu(U)):
        raise Exception('internal inconsistency')
    return U
Beispiel #44
0
PARAMS = {
    "batch_size": 1000,
    "keywords": 300,
    "latent_dim": 2,
    "sigma": 1,
    "epochs": 2, 
    "beta": 1, 
    # "kl_anneal_rate": 0.05,
    # "logistic_anneal": True,
    "learning_rate": 0.05,
}

with open("./result/keywords.txt", "r") as f:
    keywords = [w.strip('\n') for w in f.readlines()]
#%%
di = np.diag_indices(PARAMS['keywords'])
filelist = sorted([f for f in os.listdir('/Users/anseunghwan/Documents/uos/textmining/data/') if f.endswith('.npz')])
# testn = np.argmin(np.array([os.path.getsize('/Users/anseunghwan/Documents/uos/textmining/data/' + f) for f in filelist]))
testn = len(filelist)-1

I = np.eye(PARAMS['keywords'])
'''validation(test)'''
Atest_ = sparse.load_npz('/Users/anseunghwan/Documents/uos/textmining/data/Atest.npz')
Atest = Atest_.toarray().reshape((-1, PARAMS['keywords'], PARAMS['keywords']))

'''degree matrix (every node connected to itself)'''
# D = I[None, :, :] * np.sqrt(1 / np.sum(Atest_[:, di[0], di[1]], axis=-1))[:, None, None]
D = Atest[:, di[0], di[1]] * np.sqrt(1 / np.sum(Atest[:, di[0], di[1]], axis=-1, keepdims=True))
D[np.where(D == 0)] = 1
D = I[None, :, :] * D[:, None]
Atest[:, di[0], di[1]] = 1 # diagonal element
def get_frag(c: cooler.api.Cooler,
             resolution: int,
             offsets: pd.core.series.Series,
             chrom1: str,
             start1: int,
             end1: int,
             chrom2: str,
             start2: int,
             end2: int,
             width: int = 22,
             height: int = -1,
             padding: int = 10,
             normalize: bool = True,
             balanced: bool = True,
             percentile: float = 100.0,
             ignore_diags: int = 0,
             no_normalize: bool = False) -> np.ndarray:
    """
    Retrieves a matrix fragment.

    Args:
        c:
            Cooler object.
        chrom1:
            Chromosome 1. E.g.: `1` or `chr1`.
        start1:
            First start position in base pairs relative to `chrom1`.
        end1:
            First end position in base pairs relative to `chrom1`.
        chrom2:
            Chromosome 2. E.g.: `1` or `chr1`.
        start2:
            Second start position in base pairs relative to `chrom2`.
        end2:
            Second end position in base pairs relative to `chrom2`.
        offsets:
            Pandas Series of chromosome offsets in bins.
        width:
            Width of the fragment in pixels.
        height:
            Height of the fragments in pixels. If `-1` `height` will equal
            `width`. Defaults to `-1`.
        padding: Percental padding related to the dimension of the fragment.
            E.g., 10 = 10% padding (5% per side). Defaults to `10`.
        normalize:
            If `True` the fragment will be normalized to [0, 1].
            Defaults to `True`.
        balanced:
            If `True` the fragment will be balanced using Cooler.
            Defaults to `True`.
        percentile:
            Percentile clip. E.g., For 99 the maximum will be
            capped at the 99-percentile. Defaults to `100.0`.
        ignore_diags:
            Number of diagonals to be ignored, i.e., set to 0.
            Defaults to `0`.
        no_normalize:
            If `true` the returned matrix is not normalized.
            Defaults to `False`.

    Returns:

    """

    if height is -1:
        height = width

    # Restrict padding to be [0, 100]%
    padding = min(100, max(0, padding)) / 100

    # Normalize chromosome names
    if not chrom1.startswith('chr'):
        chrom1 = 'chr{}'.format(chrom1)
    if not chrom2.startswith('chr'):
        chrom2 = 'chr{}'.format(chrom2)

    # Get chromosome offset
    offset1 = offsets[chrom1]
    offset2 = offsets[chrom2]

    start_bin1 = offset1 + int(round(float(start1) / resolution))
    end_bin1 = offset1 + int(round(float(end1) / resolution)) + 1

    start_bin2 = offset2 + int(round(float(start2) / resolution))
    end_bin2 = offset2 + int(round(float(end2) / resolution)) + 1

    # Apply percental padding
    padding1 = int(round(((end_bin1 - start_bin1) / 2) * padding))
    padding2 = int(round(((end_bin2 - start_bin2) / 2) * padding))
    start_bin1 -= padding1
    start_bin2 -= padding2
    end_bin1 += padding1
    end_bin2 += padding2

    # Get the size of the region
    dim1 = end_bin1 - start_bin1
    dim2 = end_bin2 - start_bin2

    # Get additional absolute padding if needed
    padding1 = 0
    if dim1 < width:
        padding1 = int((width - dim1) / 2)
        start_bin1 -= padding1
        end_bin1 += padding1

    padding2 = 0
    if dim2 < height:
        padding2 = int((height - dim2) / 2)
        start_bin2 -= padding2
        end_bin2 += padding2

    # In case the final dimension does not math the desired domension we
    # increase the end bin. This can be caused when the padding is not
    # divisable by 2, since the padding is rounded to the nearest integer.
    abs_dim1 = abs(start_bin1 - end_bin1)
    if abs_dim1 < width:
        end_bin1 += width - abs_dim1
        abs_dim1 = width

    abs_dim2 = abs(start_bin2 - end_bin2)
    if abs_dim2 < height:
        end_bin2 += height - abs_dim2
        abs_dim2 = height

    # Maximum width / height is 512
    if abs_dim1 > hss.SNIPPET_MAT_MAX_DATA_DIM: raise SnippetTooLarge()
    if abs_dim2 > hss.SNIPPET_MAT_MAX_DATA_DIM: raise SnippetTooLarge()

    # Finally, adjust to negative values.
    # Since relative bin IDs are adjusted by the start this will lead to a
    # white offset.
    real_start_bin1 = start_bin1 if start_bin1 >= 0 else 0
    real_start_bin2 = start_bin2 if start_bin2 >= 0 else 0

    # Get the data
    data = c.matrix(as_pixels=True, balance=False,
                    max_chunk=np.inf)[real_start_bin1:end_bin1,
                                      real_start_bin2:end_bin2]

    # Annotate pixels for balancing
    bins = c.bins(convert_enum=False)[['weight']]
    data = cooler.annotate(data, bins, replace=False)

    # Calculate relative bin IDs
    rel_bin1 = np.add(data['bin1_id'].values, -start_bin1)
    rel_bin2 = np.add(data['bin2_id'].values, -start_bin2)

    # Balance counts
    if balanced:
        values = data['count'].values.astype(np.float32)
        values *= data['weight1'].values * data['weight2'].values
    else:
        values = data['count'].values

    # Get pixel IDs for the upper triangle
    idx1 = np.add(np.multiply(rel_bin1, abs_dim1), rel_bin2)

    # Mirror matrix
    idx2_1 = np.add(data['bin2_id'].values, -start_bin1)
    idx2_2 = np.add(data['bin1_id'].values, -start_bin2)
    idx2 = np.add(np.multiply(idx2_1, abs_dim1), idx2_2)
    validBins = np.where((idx2_1 < abs_dim1) & (idx2_2 >= 0))

    # Ignore diagonals
    diags_start_row = None
    if ignore_diags > 0:
        try:
            diags_start_idx = np.min(
                np.where(data['bin1_id'].values == data['bin2_id'].values))
            diags_start_row = (rel_bin1[diags_start_idx] -
                               rel_bin2[diags_start_idx])
        except ValueError:
            pass

    # Copy pixel values onto the final array
    frag_len = abs_dim1 * abs_dim2
    frag = np.zeros(frag_len, dtype=np.float32)
    # Make sure we're within the bounds
    idx1_f = np.where(idx1 < frag_len)
    frag[idx1[idx1_f]] = values[idx1_f]
    frag[idx2[validBins]] = values[validBins]
    frag = frag.reshape((abs_dim1, abs_dim2))

    # Store low quality bins
    low_quality_bins = np.where(np.isnan(frag))

    # Assign 0 for now to avoid influencing the max values
    frag[low_quality_bins] = 0

    # Scale fragment down if needed
    scaled = False
    scale_x = width / frag.shape[0]
    if frag.shape[0] > width or frag.shape[1] > height:
        scaledFrag = np.zeros((width, height), float)
        frag = scaledFrag + zoomArray(frag, scaledFrag.shape, order=1)
        scaled = True

    # Normalize by minimum
    if not no_normalize:
        min_val = np.min(frag)
        frag -= min_val

    ignored_idx = None

    # Remove diagonals
    if ignore_diags > 0 and diags_start_row is not None:
        if width == height:
            scaled_row = int(np.rint(diags_start_row / scale_x))

            idx = np.diag_indices(width)
            scaled_idx = (idx if scaled_row == 0 else
                          [idx[0][scaled_row:], idx[0][:-scaled_row]])

            for i in range(ignore_diags):

                # First set all cells to be ignored to `-1` so that we can
                # easily query for them later.
                if i == 0:
                    frag[scaled_idx] = -1
                else:
                    dist_to_diag = scaled_row - i
                    dist_neg = min(0, dist_to_diag)
                    off = 0 if dist_to_diag >= 0 else i - scaled_row

                    # Above diagonal
                    frag[((scaled_idx[0] - i)[off:],
                          (scaled_idx[1])[off:])] = -1

                    # Extra cutoff at the bottom right
                    frag[(range(
                        scaled_idx[0][-1] - i,
                        scaled_idx[0][-1] + 1 + dist_neg,
                    ),
                          range(scaled_idx[1][-1],
                                scaled_idx[1][-1] + i + 1 + dist_neg))] = -1

                    # Below diagonal
                    frag[((scaled_idx[0] + i)[:-i], (scaled_idx[1])[:-i])] = -1

            # Save the final selection of ignored cells for fast access
            # later and set those values to `0` now.
            ignored_idx = np.where(frag == -1)
            frag[ignored_idx] = 0

        else:
            logger.warn(
                'Ignoring the diagonal only supported for squared features')

    # Capp by percentile
    max_val = np.percentile(frag, percentile)
    frag = np.clip(frag, 0, max_val)

    # Normalize by maximum
    if not no_normalize and max_val > 0:
        frag /= max_val

    # Set the ignored diagonal to the maximum
    if ignored_idx:
        frag[ignored_idx] = 1.0

    if not scaled:
        # Recover low quality bins
        frag[low_quality_bins] = -1

    return frag
def main(event):

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--apath',
        type=str,
        default='/keilholz-lab/Jacob/TDABrains_00/data/FCStateClassif/anat/')
    parser.add_argument('--poolSize', type=int, default=14)
    parser.add_argument('--nVols', type=int, default=-1)
    parser.add_argument('--display', type=bool, default=False)

    args = parser.parse_args()

    apath_rest = args.apath
    apath_task = args.apath

    vloc = args.apath
    volunteers, restDirs, taskDirs, EVtxt, Blocks, Blockstxt = p_utils.getLists(
        vloc=vloc)
    random.shuffle(volunteers)

    # saveloc
    saveloc = './results/'

    poolSize = args.poolSize

    maxdim = p_utils.maxdim
    metrics = p_utils.metrics

    train_volloc = saveloc + 'ind0/'

    # Make table of existing data
    # indices pulled from train_volloc as previous step (p3_....py) runs over all available volunteers
    all_times_vols = p_utils.getSecondList(train_volloc)
    allInds = []
    volInds = {}
    allNames = []
    allNames_dict = {}
    dict_count = -1
    dLen = 0
    for voln in tqdm(all_times_vols, desc='Loading timing info.'):
        allInds.append(
            np.load(train_volloc + str(voln) + '.npy', allow_pickle=True))
        volInds[voln] = np.empty(allInds[-1].shape, dtype='int')
        for i in allInds[-1]:
            dict_count += 1
            allNames.append((voln[:5] + '_{:03d}').format(i))
            allNames_dict[allNames[-1]] = dict_count
            volInds[voln][i] = dict_count
    dLen = i + 1
    lan = len(allNames)
    san = set(allNames)

    # Also load data
    UUs = {}
    for metric in metrics:
        UUs[metric] = []
        for voln in tqdm(all_times_vols, desc='Loading data'):
            if metric == 'diagram':
                graphType = 'ripped'
            elif metric == 'simplex':
                graphType = 'simplex'
            locU = saveloc + '{}Training/'.format(graphType)
            with open(locU + str(voln) + '.pkl', 'rb') as file:
                uus = pickle.load(file)
            for uu in uus:
                if metric == 'diagram':
                    UUs[metric].append(uu['dgms'])
                if metric == 'simplex':
                    UUs[metric].append(uu)
    print('UUs[-1] is shape {}'.format(UUs[metric][-1].shape))
    nP = p_utils.knn
    nW = 1
    pool = Pool(poolSize, p_utils.getMultiscaleIndexerRanges, (
        nP,
        nW,
    ))

    # Initialize data Mat
    dataMat = {}
    for metric in metrics:
        for dim in range(maxdim + 1):
            dataMat[dim] = np.full([lan, lan], -1.0)
            dataMat[dim][np.diag_indices(lan)] = 0

    # Load finished distances
    for metric in metrics:

        if metric == 'diagram':
            dist_fun = partial(p_utils.slw2, normalize=False)
            desc = 'calc Wasserstein distance'
        elif metric == 'simplex':
            dist_fun = partial(p_utils.calcWeightedJaccard)
            desc = 'calc connected simplices'

        locBeg = saveloc + '{}AllDist_HN/Begun/'.format(metric)
        makedirs(locBeg, exist_ok=True)
        begun = p_utils.getSecondList(locBeg)

        locFin = saveloc + '{}AllDist_HN/Finished/'.format(metric)
        makedirs(locFin, exist_ok=True)
        finished = p_utils.getSecondList(locFin)
        for fin in tqdm(finished):
            fname = locBeg + fin + '.npy'
            otherInds = np.load(fname)
            fname = locFin + fin + '.npy'
            tempD = np.load(fname)
            for dim in range(maxdim + 1):
                dataMat[dim][allNames_dict[fin],
                             otherInds] = tempD[:, dim].ravel()
                dataMat[dim][otherInds,
                             allNames_dict[fin]] = tempD[:, dim].ravel()

        remaining = san - set(finished) - set(begun)

        while len(remaining) and not event.is_set():
            print('finished {}'.format(-len(remaining) + lan))
            print('remaining Vol x Times, {} of {}.'.format(
                len(remaining), lan))

            lineLbl = random.choice(tuple(remaining))
            lineNum = allNames_dict[lineLbl]
            otherInds = np.arange(lan)[(dataMat[0][lineNum, :] == -1).ravel()]

            np.save(locBeg + lineLbl + '.npy', otherInds)

            #print('printing dataMat histograms')
            #for dim in range(maxdim+1):
            #    print(np.histogram(dataMat[dim].ravel()))

            print('For datapoint {}, # remaining inds = {}.'.format(
                lineLbl, len(otherInds)))

            tempD = list(
                pool.imap(dist_fun,
                          tqdm([[UUs[metric][lineNum], UUs[metric][oi]]
                                for oi in otherInds],
                               total=len(otherInds),
                               desc=desc),
                          chunksize=1))
            #tempD = [[-1]*len(Inds)]*3
            tempD = np.array(tempD)

            print('Saving results for line {}'.format(lineLbl))
            fname = locFin + lineLbl + '.npy'
            np.save(fname, tempD)

            newly_finished = p_utils.getSecondList(locFin)

            for fin in list(set(newly_finished) - set(finished)):
                fname = locBeg + fin + '.npy'
                otherInds = np.load(fname)
                fname = locFin + fin + '.npy'
                tempD = np.load(fname)
                for dim in range(maxdim + 1):
                    dataMat[dim][allNames_dict[fin],
                                 otherInds] = tempD[:, dim].ravel()
                    dataMat[dim][otherInds,
                                 allNames_dict[fin]] = tempD[:, dim].ravel()

            finished = newly_finished
            begun = p_utils.getSecondList(locBeg)
            remaining = san - set(finished) - set(begun)

        print('finished {}'.format(-len(remaining) + lan))
        print('We done with {}'.format(metric))

    return
Beispiel #47
0
 def fn(x, P_acc):
     x_ = K.zeros((self.nb_actions, self.nb_actions))
     x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], x)
     return x_
Beispiel #48
0
import numpy as np
from scipy.optimize import minimize
from sklearn.linear_model import Ridge

# 反投影矩阵
lbp_B = np.loadtxt(r'D:\Proj\EIT\EIT-py\data\lbp_B_python.csv', delimiter=",")

# 灵敏度矩阵
S = np.loadtxt(r'D:\Proj\EIT\EIT-py\data\jacobian_python.csv', delimiter=",")
inv_S = np.linalg.pinv(S)

# 灵敏度归一化——A矩阵
W = np.zeros((576, 576))
S2 = ((S**2).sum(axis=0))**(-0.5)
row, col = np.diag_indices(W.shape[0])
W[row, col] = S2
A = np.dot(W, np.linalg.pinv(np.dot(S, W)))

#迭代计算的初始值
x0 = np.zeros(576)


def lbp(proj_d):
    """线性反投影算法"""
    #将28个边界值补全(8*7)
    proj56 = np.hstack([
        proj_d[0:7], proj_d[7:13], proj_d[0], proj_d[13:18], proj_d[1],
        proj_d[7], proj_d[18:22], proj_d[2], proj_d[8], proj_d[13],
        proj_d[22:25], proj_d[3], proj_d[9], proj_d[14], proj_d[18],
        proj_d[25:27], proj_d[4], proj_d[10], proj_d[15], proj_d[19],
        proj_d[22], proj_d[27], proj_d[5], proj_d[11], proj_d[16], proj_d[20],
Beispiel #49
0
def cholesky_eri_b(mol, erifile, auxbasis='weigend+etb', dataname='eri_mo',
                   int3c='cint3c2e_sph', aosym='s2ij', int2c='cint2c2e_sph',
                   comp=1, ioblk_size=256, verbose=logger.NOTE):
    '''3-center 2-electron AO integrals
    '''
    assert(aosym in ('s1', 's2ij'))
    time0 = (time.clock(), time.time())
    if isinstance(verbose, logger.Logger):
        log = verbose
    else:
        log = logger.Logger(mol.stdout, verbose)
    auxmol = incore.format_aux_basis(mol, auxbasis)
    j2c = incore.fill_2c2e(mol, auxmol, intor=int2c)
    log.debug('size of aux basis %d', j2c.shape[0])
    time1 = log.timer('2c2e', *time0)
    try:
        low = scipy.linalg.cholesky(j2c, lower=True)
    except scipy.linalg.LinAlgError:
        j2c[numpy.diag_indices(j2c.shape[1])] += 1e-14
        low = scipy.linalg.cholesky(j2c, lower=True)
    j2c = None
    time1 = log.timer('Cholesky 2c2e', *time1)

    if h5py.is_hdf5(erifile):
        feri = h5py.File(erifile)
        if dataname in feri:
            del(feri[dataname])
    else:
        feri = h5py.File(erifile, 'w')
    for icomp in range(comp):
        feri.create_group('%s/%d'%(dataname,icomp)) # for h5py old version

    def store(b, label):
        cderi = scipy.linalg.solve_triangular(low, b, lower=True, overwrite_b=True)
        if cderi.flags.f_contiguous:
            cderi = lib.transpose(cderi.T)
        feri[label] = cderi

    atm, bas, env, ao_loc = incore._env_and_aoloc(int3c, mol, auxmol)
    nao = ao_loc[mol.nbas]
    naoaux = ao_loc[-1] - nao
    if aosym == 's1':
        nao_pair = nao * nao
        buflen = min(max(int(ioblk_size*1e6/8/naoaux/comp), 1), nao_pair)
        shranges = _guess_shell_ranges(mol, buflen, 's1')
    else:
        nao_pair = nao * (nao+1) // 2
        buflen = min(max(int(ioblk_size*1e6/8/naoaux/comp), 1), nao_pair)
        shranges = _guess_shell_ranges(mol, buflen, 's2ij')
    log.debug('erifile %.8g MB, IO buf size %.8g MB',
              naoaux*nao_pair*8/1e6, comp*buflen*naoaux*8/1e6)
    if log.verbose >= logger.DEBUG1:
        log.debug1('shranges = %s', shranges)
    cintopt = gto.moleintor.make_cintopt(atm, bas, env, int3c)
    bufs1 = numpy.empty((comp*max([x[2] for x in shranges]),naoaux))

    for istep, sh_range in enumerate(shranges):
        log.debug('int3c2e [%d/%d], AO [%d:%d], nrow = %d', \
                  istep+1, len(shranges), *sh_range)
        bstart, bend, nrow = sh_range
        shls_slice = (bstart, bend, 0, mol.nbas, mol.nbas, mol.nbas+auxmol.nbas)
        buf = _ri.nr_auxe2(int3c, atm, bas, env, shls_slice, ao_loc,
                           aosym, comp, cintopt, bufs1)
        if comp == 1:
            store(buf.T, '%s/0/%d'%(dataname,istep))
        else:
            for icomp in range(comp):
                store(buf[icomp].T, '%s/%d/%d'%(dataname,icomp,istep))
        time1 = log.timer('gen CD eri [%d/%d]' % (istep+1,len(shranges)), *time1)
    buf = bufs1 = None

    feri.close()
    return erifile
Beispiel #50
0
    def _update(self):
        def _isinvalid(x):
            return isnan(x) or isinf(x)

        # Update the displayed confusion matrix
        if self.results is not None and self.selected_learner:
            cmatrix = confusion_matrix(self.results, self.selected_learner[0])
            colsum = cmatrix.sum(axis=0)
            rowsum = cmatrix.sum(axis=1)
            n = len(cmatrix)
            diag = np.diag_indices(n)

            colors = cmatrix.astype(np.double)
            colors[diag] = 0
            if self.selected_quantity == 0:
                normalized = cmatrix.astype(np.int)
                formatstr = "{}"
                div = np.array([colors.max()])
            else:
                if self.selected_quantity == 1:
                    normalized = 100 * cmatrix / colsum
                    div = colors.max(axis=0)
                else:
                    normalized = 100 * cmatrix / rowsum[:, np.newaxis]
                    div = colors.max(axis=1)[:, np.newaxis]
                formatstr = "{:2.1f} %"
            div[div == 0] = 1
            colors /= div
            maxval = normalized[diag].max()
            if maxval > 0:
                colors[diag] = normalized[diag] / maxval

            for i in range(n):
                for j in range(n):
                    val = normalized[i, j]
                    col_val = colors[i, j]
                    item = self._item(i + 2, j + 2)
                    item.setData(
                        "NA" if _isinvalid(val) else formatstr.format(val),
                        Qt.DisplayRole)
                    bkcolor = QColor.fromHsl(
                        [0, 240][i == j], 160,
                        255 if _isinvalid(col_val) else int(255 -
                                                            30 * col_val))
                    item.setData(QBrush(bkcolor), Qt.BackgroundRole)
                    item.setData("trbl", BorderRole)
                    item.setToolTip("actual: {}\npredicted: {}".format(
                        self.headers[i], self.headers[j]))
                    item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
                    item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
                    self._set_item(i + 2, j + 2, item)

            bold_font = self.tablemodel.invisibleRootItem().font()
            bold_font.setBold(True)

            def _sum_item(value, border=""):
                item = QStandardItem()
                item.setData(value, Qt.DisplayRole)
                item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
                item.setFlags(Qt.ItemIsEnabled)
                item.setFont(bold_font)
                item.setData(border, BorderRole)
                item.setData(QColor(192, 192, 192), BorderColorRole)
                return item

            for i in range(n):
                self._set_item(n + 2, i + 2, _sum_item(int(colsum[i]), "t"))
                self._set_item(i + 2, n + 2, _sum_item(int(rowsum[i]), "l"))
            self._set_item(n + 2, n + 2, _sum_item(int(rowsum.sum())))
Beispiel #51
0
 def forward(self, x):
     # create diagonal matrices
     m = np.zeros((x.size * self.dim)).reshape(-1, self.dim, self.dim)
     x = x.reshape(-1, self.dim)
     m[(np.s_[:], ) + np.diag_indices(x.shape[1])] = x
     return m
Beispiel #52
0
def run_isoc_CE(objective,
                obs,
                obs_er,
                filters,
                refmag,
                prange,
                sample,
                itmax,
                band,
                alpha,
                tol,
                weight,
                prior,
                seed,
                nthreads=1,
                guess=False):

    # Define arrays to be used
    ndim = prange.shape[0]
    lik = np.zeros(sample)
    center = np.zeros([ndim, itmax])
    sigma = np.zeros([ndim, itmax])
    pars_best = 0
    avg_var = 1e3
    diag_ind = np.diag_indices(ndim)
    midpoint = (prange[:, 1] - prange[:, 0]) / 2. + prange[:, 0]

    # generate initial solution population
    pars = []
    for k in range(ndim):
        aux = np.random.uniform(prange[k, 0], prange[k, 1], sample)
        pars.append(aux)
    pars = np.array(pars)

    if (guess != False):
        pars[:, 0] = guess

    iter = 0

    # enforce prange limits
    for n in range(sample):
        ind_low = np.where(pars[:, n] - prange[:, 0] < 0.)
        pars[ind_low, n] = prange[ind_low, 0]
        ind_hi = np.where(pars[:, n] - prange[:, 1] > 0.)
        pars[ind_hi, n] = prange[ind_hi, 1]

    while (iter < itmax and avg_var > tol):

        ##########################################################################################
        #     run liklihood calculation in parallel
        pool = mp.Pool(processes=nthreads)
        res = [
            pool.apply_async(objective,
                             args=(
                                 theta,
                                 obs,
                                 obs_er,
                                 filters,
                                 refmag,
                                 prange,
                                 weight,
                                 prior,
                                 seed,
                             )) for theta in pars.T
        ]
        lik = np.array([p.get() for p in res])
        pool.close()
        pool.join()
        ###########################################################################################

        # sort solution in descending likelihood
        ind = np.argsort(lik)

        # best solution in iteration
        pars_best = np.copy(pars[:, ind[0]])
        lik_best = lik[ind[0]]
        # indices of band best solutions
        ind_best = ind[0:int(band * sample)]

        # discard indices that are out of parameter space
        ind_best = ind_best[np.isfinite(lik[ind_best])]

        # leaving here for now. This allows for change in convergence speed
        beta = alpha  #* hill(iter,4.,0.5*itmax) + 0.01
        peso = np.resize(-lik, (ndim, sample))

        # calculate new proposal distribution
        if (iter == 0):
            center[:, iter] = np.nanmean(pars[:, ind_best], axis=1)
            covmat = np.cov(pars[:, ind_best])
        else:
            center[:,iter] = beta*np.nansum(pars[:,ind_best]*peso[:,ind_best],axis=1) \
            / np.nansum(peso[:,ind_best],axis=1) +  (1.-beta)*center[:,iter-1]

            covmat = beta * np.cov(pars[:, ind_best]) + (1. - beta) * covmat

        # check center variance in the last 10 iterations
        if (iter > 10):
            avg_var = np.max(
                np.std(center[:, iter - 10:iter], axis=1) / center[:, iter])

        if (covmat[2, 2] < 0.1):
            covmat[2, 2] = 0.2

        pars = np.random.multivariate_normal(center[:, iter], covmat, sample).T

        # enforce prange limits
        for n in range(sample):
            ind_low = np.where(pars[:, n] - prange[:, 0] < 0.)
            pars[ind_low, n] = midpoint[ind_low]
            ind_hi = np.where(pars[:, n] - prange[:, 1] > 0.)
            pars[ind_hi, n] = midpoint[ind_hi]

        # keep best solution
        pars[:, 0] = pars_best

        #        print('center:',center[:,iter])
        #        print(iter, avg_var, covmat[diag_ind])
        #        print('best:',lik_best,pars_best)
        #        print('')

        iter += 1

#        print 'Best solution'
    print('     '.join('%0.3f' % v for v in pars_best),
          "{0:0.2f}".format(lik_best), iter, "{0:0.5f}".format(avg_var))

    return pars_best
Beispiel #53
0
    def _newton_rhaphson(
        self,
        df,
        events,
        start,
        stop,
        weights,
        show_progress=False,
        step_size=None,
        precision=10e-6,
        max_steps=50,
        initial_point=None,
    ):  # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements
        """
        Newton Rhaphson algorithm for fitting CPH model.

        Parameters
        ----------
        df: DataFrame
        stop_times_events: DataFrame
             meta information about the subjects history
        show_progress: bool, optional (default: True)
            to show verbose output of convergence
        step_size: float
            > 0 to determine a starting step size in NR algorithm.
        precision: float
            the convergence halts if the norm of delta between
                     successive positions is less than epsilon.

        Returns
        --------
        beta: (1,d) numpy array.
        """
        assert precision <= 1.0, "precision must be less than or equal to 1."

        # soft penalizer functions, from https://www.cs.ubc.ca/cgi-bin/tr/2009/TR-2009-19.pdf
        soft_abs = lambda x, a: 1 / a * (anp.logaddexp(0, -a * x) + anp.logaddexp(0, a * x))
        penalizer = (
            lambda beta, a: n
            * 0.5
            * (
                self.l1_ratio * (self.penalizer * soft_abs(beta, a)).sum()
                + (1 - self.l1_ratio) * (self.penalizer * beta ** 2).sum()
            )
        )
        d_penalizer = elementwise_grad(penalizer)
        dd_penalizer = elementwise_grad(d_penalizer)

        n, d = df.shape

        # make sure betas are correct size.
        if initial_point is not None:
            beta = initial_point
        else:
            beta = np.zeros((d,))

        i = 0
        converging = True
        ll, previous_ll = 0, 0
        start_time = time.time()

        step_sizer = StepSizer(step_size)
        step_size = step_sizer.next()

        while converging:
            i += 1

            if self.strata is None:
                h, g, ll = self._get_gradients(df.values, events.values, start.values, stop.values, weights.values, beta)
            else:
                g = np.zeros_like(beta)
                h = np.zeros((d, d))
                ll = 0
                for _h, _g, _ll in self._partition_by_strata_and_apply(
                    df, events, start, stop, weights, self._get_gradients, beta
                ):
                    g += _g
                    h += _h
                    ll += _ll

            if i == 1 and np.all(beta == 0):
                # this is a neat optimization, the null partial likelihood
                # is the same as the full partial but evaluated at zero.
                # if the user supplied a non-trivial initial point, we need to delay this.
                self._log_likelihood_null = ll

            if isinstance(self.penalizer, np.ndarray) or self.penalizer > 0:
                ll -= penalizer(beta, 1.5 ** i)
                g -= d_penalizer(beta, 1.5 ** i)
                h[np.diag_indices(d)] -= dd_penalizer(beta, 1.5 ** i)

            try:
                # reusing a piece to make g * inv(h) * g.T faster later
                inv_h_dot_g_T = spsolve(-h, g, sym_pos=True)
            except ValueError as e:
                if "infs or NaNs" in str(e):
                    raise ConvergenceError(
                        """hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
                        e,
                    )
                else:
                    # something else?
                    raise e
            except LinAlgError as e:
                raise ConvergenceError(
                    """Convergence halted due to matrix inversion problems. Suspicion is high colinearity. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
                    e,
                )

            delta = step_size * inv_h_dot_g_T

            if np.any(np.isnan(delta)):
                raise ConvergenceError(
                    """delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
"""
                )
            # Save these as pending result
            hessian, gradient = h, g
            norm_delta = norm(delta)
            newton_decrement = g.dot(inv_h_dot_g_T) / 2

            if show_progress:
                print(
                    "\rIteration %d: norm_delta = %.5f, step_size = %.5f, ll = %.5f, newton_decrement = %.5f, seconds_since_start = %.1f"
                    % (i, norm_delta, step_size, ll, newton_decrement, time.time() - start_time),
                    end="",
                )

            # convergence criteria
            if norm_delta < precision:
                converging, completed = False, True
            elif previous_ll > 0 and abs(ll - previous_ll) / (-previous_ll) < 1e-09:
                # this is what R uses by default
                converging, completed = False, True
            elif newton_decrement < 10e-8:
                converging, completed = False, True
            elif i >= max_steps:
                # 50 iterations steps with N-R is a lot.
                # Expected convergence is less than 10 steps
                converging, completed = False, False
            elif step_size <= 0.0001:
                converging, completed = False, False
            elif abs(ll) < 0.0001 and norm_delta > 1.0:
                warnings.warn(
                    "The log-likelihood is getting suspiciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. \
See https://stats.stackexchange.com/questions/11109/how-to-deal-with-perfect-separation-in-logistic-regression\n",
                    ConvergenceWarning,
                )
                converging, completed = False, False

            step_size = step_sizer.update(norm_delta).next()

            beta += delta

        self._hessian_ = hessian
        self._score_ = gradient
        self.log_likelihood_ = ll

        if show_progress and completed:
            print("Convergence completed after %d iterations." % (i))
        elif show_progress and not completed:
            print("Convergence failed. See any warning messages.")

        # report to the user problems that we detect.
        if completed and norm_delta > 0.1:
            warnings.warn(
                "Newton-Rhapson convergence completed but norm(delta) is still high, %.3f. This may imply non-unique solutions to the maximum likelihood. Perhaps there is colinearity or complete separation in the dataset?"
                % norm_delta,
                ConvergenceWarning,
            )
        elif not completed:
            warnings.warn("Newton-Rhapson failed to converge sufficiently in %d steps." % max_steps, ConvergenceWarning)

        return beta
Beispiel #54
0
            raise RuntimeError('corrupted result file (PdB len mismatch)')

        pidxRange = np.where(resf['results']['Epsilon'] == 0)[0]

# some debug output
print('Channel ID = {}\nSavefile = {}\nNum Tasks = {}'.format(
    cidx, outfile, len(PdB)))

if len(pidxRange) != len(PdB):
    print('Remaining Tasks = {}\n\n'.format(len(pidxRange)))
else:
    print('\n\n')
sys.stdout.flush()

# start slaving away
dix = np.diag_indices(numUE)
t = WSEE(mu, Pc)

beta = np.asarray(heff[cidx], dtype=np.double)
alpha = beta[dix]
beta[dix] = 0
t.setChan(alpha, beta)

for pidx in pidxRange:
    print((30 * '=' + ' PdB = {} ({}/{}) ' + 30 * '=').format(
        PdB[pidx], pidx, len(PdB)))
    sys.stdout.flush()

    t.setPmax(Plin[pidx])
    t.optimize()
Beispiel #55
0
def add_diagonal_limit(mat, val, max_size):
    di = np.diag_indices(min(min(mat.shape), max_size), mat.ndim)
    mat[di] += val
Beispiel #56
0
    adj_mat                     = np.zeros((p,p))
    adj_mat[0,1]                = 1
elif sim_case == 1:
    # transitive three-node network (0 --> 1 --> 2)
    p                           = 3
    adj_mat                     = np.zeros((p,p))
    adj_mat[0,1] = adj_mat[1,2] = 1
else:
    # large sparse/weakly-coupled random network (potentially recurrent)
    p                           = 5
    sparse                      = 0.2
    adj_mat                     = np.zeros((p**2))
    adj_mat[0:int(sparse*p**2)] = 1                                                                                                                                                                                                     
    np.random.shuffle(adj_mat)
    adj_mat                     = adj_mat.reshape((p,p))
    adj_mat[np.diag_indices(p)] = 0


connection_strength     = 1.0

time_step               = 0.001
time_period             = 5.0
time_range              = np.arange(-time_period / 2, time_period / 2, time_step)
ntime_points            = int(time_period / time_step)
padding_window          = 50

simulation_params       = {'network'                : adj_mat,
                           'connection_strength'    : connection_strength,
                           'time_step'              : time_step,
                           'time_period'            : time_period,
                           'padding'                : padding_window}
Beispiel #57
0
def trans_rdm1(myci, cibra, ciket, nmo=None, nocc=None):
    r'''
    One-particle spin density matrices dm1a, dm1b in MO basis (the
    occupied-virtual blocks due to the orbital response contribution are not
    included).

    dm1a[p,q] = <q_alpha^\dagger p_alpha>
    dm1b[p,q] = <q_beta^\dagger p_beta>

    The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
    '''
    if nmo is None: nmo = myci.nmo
    if nocc is None: nocc = myci.nocc
    c0bra, c1bra, c2bra = myci.cisdvec_to_amplitudes(cibra,
                                                     nmo,
                                                     nocc,
                                                     copy=False)
    c0ket, c1ket, c2ket = myci.cisdvec_to_amplitudes(ciket,
                                                     nmo,
                                                     nocc,
                                                     copy=False)

    nmoa, nmob = nmo
    nocca, noccb = nocc
    bra1a, bra1b = c1bra
    bra2aa, bra2ab, bra2bb = c2bra
    ket1a, ket1b = c1ket
    ket2aa, ket2ab, ket2bb = c2ket

    dvoa = c0bra.conj() * ket1a.T
    dvob = c0bra.conj() * ket1b.T
    dvoa += numpy.einsum('jb,ijab->ai', bra1a.conj(), ket2aa)
    dvoa += numpy.einsum('jb,ijab->ai', bra1b.conj(), ket2ab)
    dvob += numpy.einsum('jb,ijab->ai', bra1b.conj(), ket2bb)
    dvob += numpy.einsum('jb,jiba->ai', bra1a.conj(), ket2ab)

    dova = c0ket * bra1a.conj()
    dovb = c0ket * bra1b.conj()
    dova += numpy.einsum('jb,ijab->ia', ket1a.conj(), bra2aa)
    dova += numpy.einsum('jb,ijab->ia', ket1b.conj(), bra2ab)
    dovb += numpy.einsum('jb,ijab->ia', ket1b.conj(), bra2bb)
    dovb += numpy.einsum('jb,jiba->ia', ket1a.conj(), bra2ab)

    dooa = -numpy.einsum('ia,ka->ik', bra1a.conj(), ket1a)
    doob = -numpy.einsum('ia,ka->ik', bra1b.conj(), ket1b)
    dooa -= numpy.einsum('ijab,ikab->jk', bra2aa.conj(), ket2aa) * .5
    dooa -= numpy.einsum('jiab,kiab->jk', bra2ab.conj(), ket2ab)
    doob -= numpy.einsum('ijab,ikab->jk', bra2bb.conj(), ket2bb) * .5
    doob -= numpy.einsum('ijab,ikab->jk', bra2ab.conj(), ket2ab)

    dvva = numpy.einsum('ia,ic->ac', ket1a, bra1a.conj())
    dvvb = numpy.einsum('ia,ic->ac', ket1b, bra1b.conj())
    dvva += numpy.einsum('ijab,ijac->bc', ket2aa, bra2aa.conj()) * .5
    dvva += numpy.einsum('ijba,ijca->bc', ket2ab, bra2ab.conj())
    dvvb += numpy.einsum('ijba,ijca->bc', ket2bb, bra2bb.conj()) * .5
    dvvb += numpy.einsum('ijab,ijac->bc', ket2ab, bra2ab.conj())

    dm1a = numpy.empty((nmoa, nmoa), dtype=dooa.dtype)
    dm1a[:nocca, :nocca] = dooa
    dm1a[:nocca, nocca:] = dova
    dm1a[nocca:, :nocca] = dvoa
    dm1a[nocca:, nocca:] = dvva
    norm = numpy.dot(cibra, ciket)
    dm1a[numpy.diag_indices(nocca)] += norm

    dm1b = numpy.empty((nmob, nmob), dtype=dooa.dtype)
    dm1b[:noccb, :noccb] = doob
    dm1b[:noccb, noccb:] = dovb
    dm1b[noccb:, :noccb] = dvob
    dm1b[noccb:, noccb:] = dvvb
    dm1b[numpy.diag_indices(noccb)] += norm

    if myci.frozen is not None:
        nmoa = myci.mo_occ[0].size
        nmob = myci.mo_occ[1].size
        nocca = numpy.count_nonzero(myci.mo_occ[0] > 0)
        noccb = numpy.count_nonzero(myci.mo_occ[1] > 0)
        rdm1a = numpy.zeros((nmoa, nmoa), dtype=dm1a.dtype)
        rdm1b = numpy.zeros((nmob, nmob), dtype=dm1b.dtype)
        rdm1a[numpy.diag_indices(nocca)] = norm
        rdm1b[numpy.diag_indices(noccb)] = norm
        moidx = myci.get_frozen_mask()
        moidxa = numpy.where(moidx[0])[0]
        moidxb = numpy.where(moidx[1])[0]
        rdm1a[moidxa[:, None], moidxa] = dm1a
        rdm1b[moidxb[:, None], moidxb] = dm1b
        dm1a = rdm1a
        dm1b = rdm1b
    return dm1a, dm1b
Beispiel #58
0
    def _to_standard_index(data_array, desired_shape, is_multi_var=False):
        """
        Transform swath data to a standard format where data runs along
        diagonal of ND matrix and the non-diagonal data points are
        masked

        :param data_array: The data array to be transformed
        :param desired_shape: The desired shape of the resulting array
        :param is_multi_var: True if this is a multi-variable tile
        :type data_array: np.array
        :type desired_shape: tuple
        :type is_multi_var: bool
        :return: Reshaped array
        :rtype: np.array
        """

        if desired_shape[0] == 1:
            reshaped_array = np.ma.masked_all(
                (desired_shape[1], desired_shape[2]))
            row, col = np.indices(data_array.shape)

            reshaped_array[np.diag_indices(
                desired_shape[1],
                len(reshaped_array.shape))] = data_array[row.flat, col.flat]
            reshaped_array.mask[np.diag_indices(
                desired_shape[1],
                len(reshaped_array.shape))] = data_array.mask[row.flat,
                                                              col.flat]
            reshaped_array = reshaped_array[np.newaxis, :]
        elif is_multi_var == True:
            # Break the array up by variable. Translate shape from
            # len(times) x len(latitudes) x len(longitudes) x num_vars,
            # to
            # num_vars x len(times) x len(latitudes) x len(longitudes)
            reshaped_data_array = np.moveaxis(data_array, -1, 0)
            reshaped_array = []

            for variable_data_array in reshaped_data_array:
                variable_reshaped_array = np.ma.masked_all(desired_shape)
                row, col = np.indices(variable_data_array.shape)

                variable_reshaped_array[np.diag_indices(
                    desired_shape[1],
                    len(variable_reshaped_array.shape))] = variable_data_array[
                        row.flat, col.flat]
                variable_reshaped_array.mask[np.diag_indices(
                    desired_shape[1], len(variable_reshaped_array.shape)
                )] = variable_data_array.mask[row.flat, col.flat]
                reshaped_array.append(variable_reshaped_array)
        else:
            reshaped_array = np.ma.masked_all(desired_shape)
            row, col = np.indices(data_array.shape)

            reshaped_array[np.diag_indices(
                desired_shape[1],
                len(reshaped_array.shape))] = data_array[row.flat, col.flat]
            reshaped_array.mask[np.diag_indices(
                desired_shape[1],
                len(reshaped_array.shape))] = data_array.mask[row.flat,
                                                              col.flat]

        return reshaped_array
    def transition_features(data):
        S = np.zeros(data.shape)
        for i in range(5):
            S[:, i] = np.convolve(data[:, i], np.ones(9), mode='same')

        S = softmax(S)

        cumR = np.zeros(S.shape)
        Th = 0.2
        peakTh = 10
        for j in range(5):
            for i in range(len(S)):
                if S[i - 1, j] > Th:
                    cumR[i, j] = cumR[i - 1, j] + S[i - 1, j]

            cumR[cumR[:, j] < peakTh, j] = 0

        for i in range(5):
            d = cumR[:, i]
            indP = HypnodensityFeatures.find_peaks(cumR[:, i])
            typeP = np.ones(len(indP)) * i
            if i == 0:
                peaks = np.concatenate([np.expand_dims(indP, axis=1), np.expand_dims(typeP, axis=1)], axis=1)
            else:
                peaks = np.concatenate([peaks, np.concatenate([np.expand_dims(indP, axis=1),
                                                               np.expand_dims(typeP, axis=1)], axis=1)], axis=0)

        I = [i[0] for i in sorted(enumerate(peaks[:, 0]), key=lambda x: x[1])]
        peaks = peaks[I, :]

        remList = np.zeros(len(peaks))

        peaks[peaks[:, 1] == 0, 1] = 1
        peaks[:, 1] = peaks[:, 1] - 1

        if peaks.shape[0] < 2:
            features = np.zeros(17)
            return features

        for i in range(peaks.shape[0] - 1):
            if peaks[i, 1] == peaks[i + 1, 1]:
                peaks[i + 1, 0] += peaks[i, 0]
                remList[i] = 1
        remList = remList == 0
        peaks = peaks[remList, :]
        transitions = np.zeros([4, 4])

        for i in range(peaks.shape[0] - 1):
            transitions[int(peaks[i, 1]), int(peaks[i + 1, 1])] = np.sqrt(peaks[i, 0] * peaks[i + 1, 0])
        di = np.diag_indices(4)
        transitions[di] = None

        transitions = transitions.reshape(-1)
        transitions = transitions[np.invert(np.isnan(transitions))]
        nPeaks = np.zeros(5)
        for i in range(4):
            nPeaks[i] = np.sum(peaks[:, 1] == i)

        nPeaks[-1] = peaks.shape[0]

        features = np.concatenate([transitions, nPeaks], axis=0)
        return features
Beispiel #60
0
 def fn(x, L_acc, LT_acc):
     x_ = K.zeros((self.nb_actions, self.nb_actions))
     x_ = T.set_subtensor(x_[np.tril_indices(self.nb_actions)], x)
     diag = K.exp(T.diag(x_))
     x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], diag)
     return x_, x_.T