Ejemplo n.º 1
0
def sum1(M):
    # 1-D Kahan summation along axis 1
    n, m = M.shape
    x = np.zeros(n)
    for i in range(n):
        x[i] = ksum(np.reshape(M[i, :], (1, m)))
    return x
Ejemplo n.º 2
0
    def function(self, x):
        self.count += 1
        if not np.all(x >= self.lower_bounds):
            raise_or_warn(
                'Optimizer violated the lower bounds for rate matrix elements.',
                self.on_error)

        # compute function
        K = np.zeros((self.N, self.N))
        K[self.I, self.J] = x / self.pi[self.I]
        K[self.J, self.I] = x / self.pi[self.J]
        np.fill_diagonal(K, -sum1(K))
        T = sp.linalg.expm(self.dt * K)  # use eigendecomposition instead?
        T[self.
          zero] = 1.0  # set unused elements to dummy to avoid division by 0
        # check T!=0 for C!=0
        nonzero_C = np.where(self.C != 0)
        if np.any(np.abs(T[nonzero_C]) <= 1E-20):
            warnings.warn(
                'Warning: during iteration T_ij became very small while C(tau)_ij > 0.',
                NotConnectedWarning)
        f = ksum(self.C * np.log(T))

        if self.verbose:
            logging.info('iteration=%d, log L=%f' % (self.count, f))
        return -f
Ejemplo n.º 3
0
    def function(self, x):
        self.count += 1
        if self.sparsity is None:
            assert np.all(x >= 0)
        else:
            assert np.all(x > 0)

        # compute function
        K = np.zeros((self.N, self.N))
        K[self.I, self.J] = x / self.pi[self.I]
        K[self.J, self.I] = x / self.pi[self.J]
        np.fill_diagonal(K, -sum1(K))
        T = sp.linalg.expm(self.dt * K)  # use eigendecomposition instead?
        T[self.
          zero] = 1.0  # set unused elements to dummy to avoid division by 0
        # check T!=0 for C!=0
        nonzero_C = np.where(self.C != 0)
        if np.any(np.abs(T[nonzero_C]) <= 1E-15):
            warnings.warn(
                'Warning: during iteration T_ij became very small while C(tau)_ij > 0.',
                NotConnectedWarning)
        f = ksum(self.C * np.log(T))

        if self.verbose:
            logging.info('iteration=%d, log L=%f' % (self.count, f))
        return -f
Ejemplo n.º 4
0
    def function_and_gradient(self, x):
        if not np.all(x >= self.lower_bounds):
            raise_or_warn(
                'Optimizer violated the lower bounds for rate matrix elements.',
                self.on_error)

        # compute function
        K = np.zeros((self.N, self.N))
        K[self.I, self.J] = x / self.pi[self.I]
        K[self.J, self.I] = x / self.pi[self.J]
        np.fill_diagonal(K, -sum1(K))

        # compute eigendecomposition
        lam, A, Ainv = eigen_decomposition(K, self.pi)

        # T = kdot(kdot(A,np.diag(np.exp(self.tau*lam))),Ainv)
        T = sp.linalg.expm(self.dt * K)
        T[self.
          zero_C] = 1.0  # set unused elements to dummy to avoid division by 0
        # check T!=0 for C!=0
        if np.any(np.abs(T[self.nonzero_C]) <= 1E-20):
            warnings.warn(
                'Warning: during iteration T_ij became very small while C(tau)_ij > 0. Regularizing T.',
                NotConnectedWarning)
            for i, j in zip(*self.nonzero_C):
                if T[i, j] <= 1E-20:
                    T[i, j] = 1E-20

        f = ksum(self.C * np.log(T))

        if self.verbose:
            logging.info('iteration=%d, log L=%f' % (self.count, f))
        self.count += 1

        V = getV(lam, self.dt)

        # M = Ainv.dot(Ctau.T/T.T).dot(A)*V.T
        M = kdot(kdot(Ainv, np.ascontiguousarray(self.C.T / T.T)),
                 A) * np.ascontiguousarray(V.T)
        # H = A.dot(M).dot(Ainv)
        H = kdot(kdot(A, M), Ainv)

        grad = np.zeros(len(x))
        for i in range(len(x)):
            Di = self.D[i]
            grad[i] = vdot(H[Di.col, Di.row],
                           Di.data)  # this is (H.T*Di).sum()

        return (-f, -grad)