Beispiel #1
0
def make_F_matrix(E_matrix):
    """takes an E matrix and returns an F matrix

    input is output of make_E_matrix

    for each element in matrix subtract mean of corresponding row and 
    column and add the mean of all elements in the matrix
    """
    num_rows, num_cols = shape(E_matrix)
    #make a vector of the means for each row and column
    #column_means = (add.reduce(E_matrix) / num_rows)
    column_means = (add.reduce(E_matrix) / num_rows)[:, newaxis]
    trans_matrix = transpose(E_matrix)
    row_sums = add.reduce(trans_matrix)
    row_means = row_sums / num_cols
    #calculate the mean of the whole matrix
    matrix_mean = sum(row_sums) / (num_rows * num_cols)
    #adjust each element in the E matrix to make the F matrix

    E_matrix -= row_means
    E_matrix -= column_means
    E_matrix += matrix_mean

    #for i, row in enumerate(E_matrix):
    #    for j, val in enumerate(row):
    #        E_matrix[i,j] = E_matrix[i,j] - row_means[i] - \
    #                column_means[j] + matrix_mean
    return E_matrix
def _basic_simps(y,start,stop,x,dx,axis):
    nd = len(y.shape)
    if start is None:
        start = 0
    step = 2
    all = (slice(None),)*nd
    slice0 = tupleset(all, axis, slice(start, stop, step))
    slice1 = tupleset(all, axis, slice(start+1, stop+1, step))
    slice2 = tupleset(all, axis, slice(start+2, stop+2, step))

    if x is None:  # Even spaced Simpson's rule.
        result = add.reduce(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
                                    axis)
    else:
        # Account for possibly different spacings.
        #    Simpson's rule changes a bit.
        h = diff(x,axis=axis)
        sl0 = tupleset(all, axis, slice(start, stop, step))
        sl1 = tupleset(all, axis, slice(start+1, stop+1, step))
        h0 = h[sl0]
        h1 = h[sl1]
        hsum = h0 + h1
        hprod = h0 * h1
        h0divh1 = h0 / h1
        result = add.reduce(hsum/6.0*(y[slice0]*(2-1.0/h0divh1) +
                                              y[slice1]*hsum*hsum/hprod +
                                              y[slice2]*(2-h0divh1)),axis)
    return result
 def test_reduce_keepdims(self):
     from numpy import add, arange
     a = arange(12).reshape(3, 4)
     b = add.reduce(a, 0, keepdims=True)
     assert b.shape == (1, 4)
     assert (add.reduce(a, 0, keepdims=True) == [12, 15, 18, 21]).all()
     assert (add.reduce(a, 0, None, None, True) == [12, 15, 18, 21]).all()
Beispiel #4
0
def _basic_simps(y,start,stop,x,dx,axis):
    nd = len(y.shape)
    if start is None:
        start = 0
    step = 2
    all = (slice(None),)*nd
    slice0 = tupleset(all, axis, slice(start, stop, step))
    slice1 = tupleset(all, axis, slice(start+1, stop+1, step))
    slice2 = tupleset(all, axis, slice(start+2, stop+2, step))

    if x is None:  # Even spaced Simpson's rule.
        result = add.reduce(dx/3.0* (y[slice0]+4*y[slice1]+y[slice2]),
                                    axis)
    else:
        # Account for possibly different spacings.
        #    Simpson's rule changes a bit.
        h = diff(x,axis=axis)
        sl0 = tupleset(all, axis, slice(start, stop, step))
        sl1 = tupleset(all, axis, slice(start+1, stop+1, step))
        h0 = h[sl0]
        h1 = h[sl1]
        hsum = h0 + h1
        hprod = h0 * h1
        h0divh1 = h0 / h1
        result = add.reduce(hsum/6.0*(y[slice0]*(2-1.0/h0divh1) + \
                                              y[slice1]*hsum*hsum/hprod + \
                                              y[slice2]*(2-h0divh1)),axis)
    return result
Beispiel #5
0
def make_F_matrix(E_matrix):
    """takes an E matrix and returns an F matrix

    input is output of make_E_matrix

    for each element in matrix subtract mean of corresponding row and 
    column and add the mean of all elements in the matrix
    """
    num_rows, num_cols = shape(E_matrix)
    #make a vector of the means for each row and column
    #column_means = (add.reduce(E_matrix) / num_rows)
    column_means = (add.reduce(E_matrix) / num_rows)[:,newaxis]
    trans_matrix = transpose(E_matrix)
    row_sums = add.reduce(trans_matrix)
    row_means = row_sums / num_cols
    #calculate the mean of the whole matrix
    matrix_mean = sum(row_sums) / (num_rows * num_cols)
    #adjust each element in the E matrix to make the F matrix

    E_matrix -= row_means
    E_matrix -= column_means
    E_matrix += matrix_mean

    #for i, row in enumerate(E_matrix):
    #    for j, val in enumerate(row):
    #        E_matrix[i,j] = E_matrix[i,j] - row_means[i] - \
    #                column_means[j] + matrix_mean
    return E_matrix
Beispiel #6
0
    def _1covariate_loop(self, lmls, effs, scales, yTBM, XTBM, MTBM):
        ETBE = self._ETBE
        yTBX = self._yTBX
        XTBX = [i.XTBX for i in ETBE]
        yTBy = self._yTBy

        A00 = add.reduce([i.XTBX[0, 0] for i in ETBE])
        A01 = add.reduce([i[0, :] for i in XTBM])
        A11 = add.reduce([i for i in MTBM])

        b0 = add.reduce([i[0] for i in yTBX])
        b1 = add.reduce([i for i in yTBM])

        x = hsolve(A00, A01, A11, b0, b1)
        beta = x[0][newaxis, :]
        alpha = x[1]
        bstar = _bstar_1effect(beta, alpha, yTBy, yTBX, yTBM, XTBX, XTBM, MTBM)

        scales[:] = bstar / self._nsamples
        lmls -= self._nsamples * safe_log(scales)
        lmls /= 2
        effs["eff0"][:] = beta.T
        effs["eff1"][:] = alpha

        def jinv(A):
            from numpy import eye
            from numpy.linalg import inv

            A = asarray(A, float)
            return inv(A + eye(A.shape[0]) * 1e-7)

        A00i, _, A11i = hinv(A00, A01, A11)
        effs["eff0_se"][:, 0] = sqrt(scales * A00i)
        effs["eff1_se"][:] = sqrt(scales * A11i)
        pass
Beispiel #7
0
    def test_reduceND(self):
        from numpy import add, arange

        a = arange(12).reshape(3, 4)
        assert (add.reduce(a, 0) == [12, 15, 18, 21]).all()
        assert (add.reduce(a, 1) == [6.0, 22.0, 38.0]).all()
        raises(ValueError, add.reduce, a, 2)
Beispiel #8
0
    def _multicovariate_loop(self, lmls, effs, scales, yTBM, XTBM, MTBM):
        ETBE = self._ETBE
        yTBE = self._yTBE
        tuple_size = len(yTBE)

        for i in range(XTBM[0].shape[1]):

            for j in range(tuple_size):
                yTBE[j].set_yTBM(yTBM[j][i])
                ETBE[j].set_XTBM(XTBM[j][:, [i]])
                ETBE[j].set_MTBM(MTBM[j][i])

            left = add.reduce([j.value for j in ETBE])
            right = add.reduce([j.value for j in yTBE])
            x = rsolve(left, right)
            beta = x[:-1][:, newaxis]
            alpha = x[-1:]
            bstar = _bstar_unpack(beta, alpha, self._yTBy, yTBE, ETBE,
                                  _bstar_1effect)

            se = sqrt(pinv(left).diagonal())
            scales[i] = bstar / self._nsamples
            lmls[i] -= self._nsamples * safe_log(scales[i])
            effs["eff0"][i, :] = beta.T
            effs["eff0_se"][i, :] = se[:-1]
            effs["eff1"][i] = alpha[0]
            effs["eff1_se"][i] = se[-1]

        lmls /= 2
Beispiel #9
0
 def checkHMM(self):
     """This function will asserts if the internal state of the class
     is inconsistent. (Checks that the matrices' sizes are correct and
     that they represents probabilities)."""
     assert self.A.shape == (self.N, self.N), \
            """transition_proba must be a N*N matrix, where N is 
             len(state_list)"""
     assert self.B.shape == (self.M, self.N), \
            """transition_proba must be a M*N matrix, where N is 
             len(state_list) and M is len(observation_list)"""
     assert self.pi.shape == (self.N, ), \
            """transition_proba must be a N element vector,
            where N is len(state_list)"""
     reduced = add.reduce(self.A, 1) - 1
     assert (alltrue(reduced < EPSILON) and \
             alltrue(reduced > -EPSILON) and \
             alltrue(alltrue(self.A<=1.0)) and \
             alltrue(alltrue(self.A>=0.0))),\
             """transition_proba must be a probability matrix"""
     reduced = add.reduce(self.B, 0) - 1
     assert (alltrue(reduced < EPSILON) and \
             alltrue(reduced > -EPSILON) and \
             alltrue(alltrue(self.B<=1.0)) and \
             alltrue(alltrue(self.B>=0.0))),\
             """each column of observation_proba must be a probability
             vector"""
     if len(self.pi)==0: # a zero length vector is reduced to a scalar
         return          # and makes the following test fail
     reduced = add.reduce(self.pi) - 1
     assert (reduced < EPSILON and reduced > -EPSILON and \
             alltrue(self.pi<=1.0) and \
             alltrue(self.pi>=0.0)), \
             """initial_state_proba must be a probability vector"""
Beispiel #10
0
def _bstar_set(beta, alpha, yTBy, yTBX, yTBM, XTBX, XTBM, MTBM):
    """
    Compute -2𝐲ᵀBEⱼ𝐛ⱼ + (𝐛ⱼEⱼ)ᵀBEⱼ𝐛ⱼ.

    For 𝐛ⱼ = [𝜷ⱼᵀ 𝜶ⱼᵀ]ᵀ.
    """
    from numpy_sugar import epsilon

    r = yTBy
    r -= 2 * add.reduce([i @ beta for i in yTBX])
    r -= 2 * add.reduce([i @ alpha for i in yTBM])
    r += add.reduce([beta.T @ i @ beta for i in XTBX])
    r += 2 * add.reduce([beta.T @ i @ alpha for i in XTBM])
    r += add.reduce([alpha.T @ i @ alpha for i in MTBM])
    return clip(r, epsilon.tiny, inf)
Beispiel #11
0
 def _learn_A(self, states):
     """Train the transition probability matrix according to one 
     sequence of states"""
     T = len(states)
     N = self.N
     self.A = zeros((N, N))
     for k in range(T - 1):
         Si = states[k]
         Sj = states[k + 1]
         i = self.X_index[Si]
         j = self.X_index[Sj]
         self.A[i, j] += 1
     for i in range(N):
         if add.reduce(self.A, 1)[i] == 0:
             self.A[i, i] = 1
     self.A *= 1. / add.reduce(self.A, 1)[:, newaxis]
Beispiel #12
0
def _correct_M( M, k, p ):
    """This function is a hack. It looks for states with 0 probabilities, and
    changes this probability to a uniform probability. This avoids divide by zero
    errors, and doesn't change the result of the algorithm.
    You can only have 0 probabilities if your observation matrix contains symbols
    that don't appear in your observations AND the initial state transition and
    observation probabilities are such that a state is reachable only if you observe
    those symbols.
    Parameters are:
    M the matrix
    k the axis along which we need a pdf
    p the value to replace with (usually 1/M.shape[k])
    """
    D = equal( add.reduce( M, k ), 0.0)
    if k == 1:
        for i in xrange(M.shape[0]):
            if D[i]:
                M[i, :] = p
    elif k == 0:
        for i in xrange(M.shape[1]):
            if D[i]:
                M[:, i] = p
    else:
        raise "Not Implemented"
    return M
def gaussian_convolution(data, ijk_linewidths):

    from numpy import float32, zeros, add, divide, outer, reshape
    if data.dtype.type != float32:
        data = data.astype(float32)

    from math import exp
    gaussians = []
    for a in range(3):
        size = data.shape[a]
        gaussian = zeros((size, ), float32)
        hw = ijk_linewidths[2 - a] / 2.0
        for i in range(size):
            u = min(i, size - i) / hw
            p = min(u * u / 2, 100)  # avoid OverflowError with exp()
            gaussian[i] = exp(-p)
        area = add.reduce(gaussian)
        divide(gaussian, area, gaussian)
        gaussians.append(gaussian)

    g01 = outer(gaussians[0], gaussians[1])
    g012 = outer(g01, gaussians[2])
    g012 = reshape(g012, data.shape)

    cdata = zeros(data.shape, float32)

    from numpy.fft import fftn, ifftn
    # TODO: Fourier transform Gaussian analytically to reduce computation time
    #       about 30% (one of three fft calculations).
    ftg = fftn(g012)
    ftd = fftn(data)
    gd = ifftn(ftg * ftd)
    gd = gd.astype(float32)
    return gd
Beispiel #14
0
def _compute_qth_percentile(sorted, q, axis, out):
    if not isscalar(q):
        p = [_compute_qth_percentile(sorted, qi, axis, None) for qi in q]

        if out is not None:
            out.flat = p

        return p

    q = q / 100.0
    if (q < 0) or (q > 1):
        raise ValueError("percentile must be either in the range [0,100]")

    indexer = [slice(None)] * sorted.ndim
    Nx = sorted.shape[axis]
    index = q * (Nx - 1)
    i = int(index)
    if i == index:
        indexer[axis] = slice(i, i + 1)
        weights = array(1)
        sumval = 1.0
    else:
        indexer[axis] = slice(i, i + 2)
        j = i + 1
        weights = array([(j - index), (index - i)], float)
        wshape = [1] * sorted.ndim
        wshape[axis] = 2
        weights.shape = wshape
        sumval = weights.sum()

    # Use add.reduce in both cases to coerce data type as well as
    #   check and use out array.
    return add.reduce(sorted[indexer] * weights, axis=axis, out=out) / sumval
Beispiel #15
0
 def _likelihood( self, scale_factors ):
     """This function computes the log likelihood
     of the training set using the precomputed
     alpha probabilities (sum(k=0..N,alpha(T,k)).
     It should increase during the learning process."""
     t = where( scale_factors==0.0, SMALLESTFLOAT, scale_factors )
     return -add.reduce( log(t) )
Beispiel #16
0
    def test_reduce(self):
        from numpy import add, maximum

        assert add.reduce([1, 2, 3]) == 6
        assert maximum.reduce([1]) == 1
        assert maximum.reduce([1, 2, 3]) == 3
        raises(ValueError, maximum.reduce, [])
Beispiel #17
0
    def plane(self, matrix):

        from numpy import ravel, minimum, maximum, add, multiply, array, float32
        matrix_1d = matrix.ravel()
        dmin = minimum.reduce(matrix_1d)
        if self.min == None or dmin < self.min:
            self.min = dmin
        dmax = maximum.reduce(matrix_1d)
        if self.max == None or dmax > self.max:
            self.max = dmax
        self.sum += add.reduce(matrix_1d)
        # TODO: Don't copy array to get standard deviation.
        # Avoid overflow when squaring integral types
        m2 = array(matrix_1d, float32)
        multiply(m2, m2, m2)
        self.sum2 += add.reduce(m2)
Beispiel #18
0
    def plane(self, matrix):

        from numpy import ravel, minimum, maximum, add, multiply, array, float32
        matrix_1d = matrix.ravel()
        dmin = minimum.reduce(matrix_1d)
        if self.min == None or dmin < self.min:
            self.min = dmin
        dmax = maximum.reduce(matrix_1d)
        if self.max == None or dmax > self.max:
            self.max = dmax
        self.sum += add.reduce(matrix_1d)
        # TODO: Don't copy array to get standard deviation.
        # Avoid overflow when squaring integral types
        m2 = array(matrix_1d, float32)
        multiply(m2, m2, m2)
        self.sum2 += add.reduce(m2)
Beispiel #19
0
def gaussian_convolution(data, ijk_linewidths):

  from numpy import float32, zeros, add, divide, outer, reshape
  if data.dtype.type != float32:
    data = data.astype(float32)

  from math import exp
  gaussians = []
  for a in range(3):
    size = data.shape[a]
    gaussian = zeros((size,), float32)
    hw = ijk_linewidths[2-a] / 2.0
    for i in range(size):
      u = min(i,size-i) / hw
      p = min(u*u/2, 100)               # avoid OverflowError with exp()
      gaussian[i] = exp(-p)
    area = add.reduce(gaussian)
    divide(gaussian, area, gaussian)
    gaussians.append(gaussian)

  g01 = outer(gaussians[0], gaussians[1])
  g012 = outer(g01, gaussians[2])
  g012 = reshape(g012, data.shape)
  
  cdata = zeros(data.shape, float32)

  from numpy.fft import fftn, ifftn
  # TODO: Fourier transform Gaussian analytically to reduce computation time
  #       about 30% (one of three fft calculations).
  ftg = fftn(g012)
  ftd = fftn(data)
  gd = ifftn(ftg * ftd)
  gd = gd.astype(float32)
  return gd
Beispiel #20
0
    def derivative_reduce(self, derivatives):
        r"""Sum of mean function derivatives.

        Returns:
            :math:`f_0' + f_1' + \dots`
        """
        return add.reduce(derivatives)
    def get_stacking(self, rvec):
        """
        Returns dictionary with one of the types
        (<<, >>, <>, ><) for the two residues.
        Or None, if they are not stacked.
        """
        distance, nn_ang, n1cc_ang, n2cc_ang = self.calc_angles(rvec)
        if distance and (n1cc_ang < 40 or n1cc_ang > 140 \
                          or n2cc_ang < 40 or n2cc_ang > 140):
            # find out whether the normals are opposed or straight
            # (pointing in the same direction).
            if nn_ang < 30:
                straight = True
            elif nn_ang > 150:
                straight = False
            else:
                return None  # invalid normal angle
            # find out whether base2 is on top of base1
            # calculate whether the normal on base1 brings one closer to base2
            n1c2 = rvec.center - self.center - self.normal
            n1c2dist = sqrt(add.reduce(n1c2 * n1c2))  # vector length
            is_up = n1c2dist < distance

            stacktype = STACKINGS[(straight, is_up)]
            return StackingInteraction(self.residue, \
                rvec.residue, stacktype)
Beispiel #22
0
    def test_reduce_errors(self):
        from numpy import sin, add, maximum, zeros

        raises(ValueError, sin.reduce, [1, 2, 3])
        assert add.reduce(1) == 1

        assert list(maximum.reduce(zeros((2, 0)), axis=0)) == []
        exc = raises(ValueError, maximum.reduce, zeros((2, 0)), axis=None)
        assert exc.value[0] == ("zero-size array to reduction operation " "maximum which has no identity")
        exc = raises(ValueError, maximum.reduce, zeros((2, 0)), axis=1)
        assert exc.value[0] == ("zero-size array to reduction operation " "maximum which has no identity")

        a = zeros((2, 2)) + 1
        assert (add.reduce(a, axis=1) == [2, 2]).all()
        assert (add.reduce(a, axis=(1,)) == [2, 2]).all()
        exc = raises(ValueError, add.reduce, a, axis=2)
        assert exc.value[0] == "'axis' entry is out of bounds"
Beispiel #23
0
 def _weighting_factor_Pk(self, observation):
     """compute Wk = P(Observation_k | lambda_k) """
     Tk = len(observation)
     obsIndices = self._get_observationIndices(observation)
     Bo = take(self.B, obsIndices, 0)
     alpha_s, scalingFactor = self.alpha_scaled(self.A, Bo, self.pi)
     alpha = alpha_s[Tk-1] / product(scalingFactor, 0)
     return add.reduce(alpha)
Beispiel #24
0
    def value(self):
        """
        Sum of mean vectors, 𝐟₀ + 𝐟₁ + ….

        Returns
        -------
        𝐦 : ndarray
            𝐟₀ + 𝐟₁ + ….
        """
        return add.reduce([mean.value() for mean in self._means])
Beispiel #25
0
 def getStats(self, histo):
     m = array(histo[1:])
     sum = 0.0
     sum2 = 0.0
     n = float(add.reduce(m))
     for j in range(len(m)):
         sum = sum + j * m[j]
         sum2 = sum2 + (j ** 2) * float(m[j])
     var = (sum2-(sum**2.0)/n)/n
     return sum/n,sqrt(var)
Beispiel #26
0
    def value(self):
        r"""
        Sum of covariance matrices.

        Returns
        -------
        K : ndarray
            K₀ + K₁ + ⋯
        """
        return add.reduce([cov.value() for cov in self._covariances])
Beispiel #27
0
def sample_from_histogram(p, n_samples=1):
    """
    returns the indice of bin according to the histogram p

    @param p: histogram
    @type p: numpy.array
    @param n_samples: number of samples to generate
    @type n_samples: integer
    """
    
    from numpy import add, less, argsort, take, arange
    from numpy.random import random

    indices = argsort(p)
    indices = take(indices, arange(len(p) - 1, -1, -1))

    c = add.accumulate(take(p, indices)) / add.reduce(p)

    return indices[add.reduce(less.outer(c, random(n_samples)), 0)]
    def test_reduce_errors(self):
        from numpy import sin, add, maximum, zeros

        raises(ValueError, sin.reduce, [1, 2, 3])
        assert add.reduce(1) == 1

        assert list(maximum.reduce(zeros((2, 0)), axis=0)) == []
        exc = raises(ValueError, maximum.reduce, zeros((2, 0)), axis=None)
        assert exc.value[0] == ('zero-size array to reduction operation '
                                'maximum which has no identity')
        exc = raises(ValueError, maximum.reduce, zeros((2, 0)), axis=1)
        assert exc.value[0] == ('zero-size array to reduction operation '
                                'maximum which has no identity')

        a = zeros((2, 2)) + 1
        assert (add.reduce(a, axis=1) == [2, 2]).all()
        assert (add.reduce(a, axis=(1,)) == [2, 2]).all()
        exc = raises(ValueError, add.reduce, a, axis=2)
        assert exc.value[0] == "'axis' entry is out of bounds"
Beispiel #29
0
    def _multicovariate_set(self, yTBM, XTBM, MTBM):

        yTBE = [_yTBE(i, j.shape[0]) for (i, j) in zip(self._yTBX, yTBM)]
        for a, b in zip(yTBE, yTBM):
            a.set_yTBM(b)

        set_size = yTBM[0].shape[0]
        ETBE = [
            _ETBE(i, j, set_size) for (i, j) in zip(self._XTQDi, self._XTQ)
        ]

        for a, b, c in zip(ETBE, XTBM, MTBM):
            a.set_XTBM(b)
            a.set_MTBM(c)

        left = add.reduce([j.value for j in ETBE])
        right = add.reduce([j.value for j in yTBE])
        x = rsolve(left, right)

        beta = x[:-set_size]
        alpha = x[-set_size:]
        bstar = _bstar_unpack(beta, alpha, self._yTBy, yTBE, ETBE, _bstar_set)

        lml = self._static_lml()

        scale = bstar / self._nsamples
        lml -= self._nsamples * safe_log(scale)
        lml /= 2

        effsizes_se = sqrt(scale * pinv(left).diagonal())
        beta_se = effsizes_se[:-set_size]
        alpha_se = effsizes_se[-set_size:]

        return {
            "lml": lml,
            "effsizes0": beta,
            "effsizes0_se": beta_se,
            "effsizes1": alpha,
            "effsizes1_se": alpha_se,
            "scale": scale,
        }
Beispiel #30
0
 def _final_step( self, gamma, ksy, obsIndices ):
     """Compute the new model, using gamma and ksi"""
     sigma_gamma_A = add.reduce(gamma[:-1])
     sigma_gamma_B = add.reduce(gamma)
     for i in range(len(sigma_gamma_B)):
         if sigma_gamma_B[i] < EPSILON:
             sigma_gamma_B[i] = 1
     for i in range(len(sigma_gamma_A)):
         if sigma_gamma_A[i] < EPSILON:
             sigma_gamma_A[i] = 1
     ## Compute new PI
     pi_bar = gamma[0]                       # (40a)
     ## Compute new A
     A_bar  = add.reduce(ksy)
     A_bar /= sigma_gamma_A[:, newaxis] # (40b)       
     ## Compute new B
     B_bar = zeros( (self.M, self.N), float )
     for i in xrange( len(obsIndices) ):
         B_bar[obsIndices[i]] += gamma[i] 
     B_bar /= sigma_gamma_B
     return A_bar, B_bar, pi_bar
def gaussian(sdev, size):

  from math import exp
  from numpy import empty, single as floatc, add, divide

  g = empty((size,), floatc)
  for i in range(size):
    u = min(i,size-i) / sdev
    p = min(u*u/2, 100)               # avoid OverflowError with exp()
    g[i] = exp(-p)
  area = add.reduce(g)
  divide(g, area, g)
  return g
Beispiel #32
0
def _alpha_scaled(A, Bo, pi):
    """Internal method.
    Computes forward probabilities values, using a rescaling methods
    alpha_scaled[t,i]=Normalisation(P(O(1)...O(t),Q(t)=Si|model))
    Bo is the "slice" of the observation probability matrix corresponding
    to the observations (ie Bo=take(B,observation_indices)).
    For each t, c(t)=1./sum(alpha(t,i)), and C(t)=product(k=0..t,c(t))
    and alpha_scaled(t,i)=alpha(t,i)*C(t)
    The function returns: (alpha_scaled,C(t))
    """
    T = Bo.shape[0]
    N = A.shape[0]
    alpha_t = Bo[0] * pi                # (19)
    scaling_factors = zeros( T, float )
    scaling_factors[0] = 1./add.reduce(alpha_t)    
    alpha_scaled = zeros( (T, N), float)
    alpha_scaled[0] = alpha_t*scaling_factors[0]
    for i in xrange(1, T):
        alpha_t = dot(alpha_scaled[i-1], A)*Bo[i]  # (92a)        
        scaling_t = 1./add.reduce(alpha_t)
        scaling_factors[i] = scaling_t
        alpha_scaled[i] = alpha_t*scaling_t      # (92b)    
    return alpha_scaled, scaling_factors
Beispiel #33
0
def _ksi( A, Bo, alpha, beta ):
    """Compute ksi(t,i,j)=P(q_t=Si,q_(t+1)=Sj|model)"""
    N = A.shape[0]
    T = len(Bo)
    ksy = zeros( (T-1, N, N), float )
    tmp = Bo * beta
    for t in range(T-1):
        # This does transpose[alpha].(B[obs]*beta[t+1])
        # (where . is matrixproduct)
        ksit = ksy[t, :, :]
        multiply( A, tmp[t+1], ksit )
        multiply( ksit, alpha[t, :, newaxis], ksit )
        ksi_sum = add.reduce( ksit.flat )
        ksit /= ksi_sum
    return ksy
Beispiel #34
0
    def _final_step(self, gamma, obsIndices):
        """Compute the new model, using gamma"""
        sigma_gamma_B = add.reduce(gamma)
        for i in range(len(sigma_gamma_B)):
            if sigma_gamma_B[i] < EPSILON:
                sigma_gamma_B[i] = 1
        ## Compute new PI
        pi_bar = gamma[0]  # (40a)

        ## Compute new B
        B_bar = zeros((self.M, self.N), float)
        for i in xrange(len(obsIndices)):
            B_bar[obsIndices[i]] += gamma[i]
        B_bar /= sigma_gamma_B
        return B_bar, pi_bar
Beispiel #35
0
def _make_f_matrix(matrix):
    """It takes an E matrix and returns an F matrix

    The input is the output of make_E_matrix

    For each element in matrix subtract mean of corresponding row and
    column and add the mean of all elements in the matrix
    """
    num_rows, num_cols = matrix.shape
    # make a vector of the means for each row and column
    # column_means = (add.reduce(E_matrix) / num_rows)
    column_means = (add.reduce(matrix) / num_rows)[:, newaxis]
    trans_matrix = transpose(matrix)
    row_sums = add.reduce(trans_matrix)
    row_means = row_sums / num_cols
    # calculate the mean of the whole matrix
    matrix_mean = nsum(row_sums) / (num_rows * num_cols)
    # adjust each element in the E matrix to make the F matrix

    matrix -= row_means
    matrix -= column_means
    matrix += matrix_mean

    return matrix
def angle(x, y, z):
    d1 = dist3(x, y)
    d2 = dist3(z, y)

    if (d1 <= 0 or d2 <= 0):
        return 0.0

    acc = add.reduce((y - x) * (y - z)) / (d1 * d2)

    if (acc > 1):
        acc = 1.

    elif (acc < -1):
        acc = -1.

    return arccos(acc)
Beispiel #37
0
def RombergMethod(y, dx, show=False):

    axis=-1
    y = asarray(y)
    nd = len(y.shape)
    Nsamps = y.shape[axis]
    Ninterv = Nsamps-1
    n = 1
    k = 0

    while n < Ninterv:
        n <<= 1
        k += 1

    R = {}
    all = (slice(None),) * nd
    slice0 = tupleset(all, axis, 0)
    slicem1 = tupleset(all, axis, -1)
    h = Ninterv*asarray(dx)*1.0
    R[(1,1)] = (y[slice0] + y[slicem1])/2.0*h
    slice_R = all
    start = stop = step = Ninterv
    for i in range(2,k+1):
        start >>= 1
        slice_R = tupleset(slice_R, axis, slice(start,stop,step))
        step >>= 1
        R[(i,1)] = 0.5*(R[(i-1,1)] + h*add.reduce(y[slice_R],axis))
        for j in range(2,i+1):
            R[(i,j)] = R[(i,j-1)] + \
                       (R[(i,j-1)]-R[(i-1,j-1)]) / ((1 << (2*(j-1)))-1)
        h = h / 2.0

    if show:
        precis = 5
        width = 8
        formstr = "%" + str(width) + '.' + str(precis)+'f'

        print('\nMétodo de Romberg')
        print('----------------------------------')
        for i in range(1,k+1):
            for j in range(1,i+1):
                print(formstr % R[(i,j)], end=' ')
            print()
        print('----------------------------------')

    return R[(k,k)]
Beispiel #38
0
 def _weighting_factor_Pall(self, setObs):
     """compute Wk = P(setObservations | lambda_k) """
     P = 1
     for obs in setObs:
         Tk = len(obs)
         obsIndices = self._get_observationIndices(obs)
         Bo = take(self.B, obsIndices, 0)
         null = 0
         for i in range(Tk):
             null = null or (allclose(Bo[i], zeros([self.N])))
         if null:
             P = 0
         else:
             alpha_s, scalingFactor = self.alpha_scaled(self.A, Bo, self.pi)
             alpha = alpha_s[Tk-1] / product(scalingFactor, 0) 
             P *= add.reduce(alpha)
     return P
 def calc_angles(self, rvec):
     """
     Calculates whether the distance and angles between the vectors are OK.
     Returns a tuple of (dist,nn_angle,n1cc_angle,n2cc_angle) or None.
     """
     # calculate the distance between the two ring centers
     ccvec = rvec.center - self.center
     dist = sqrt(add.reduce(ccvec * ccvec))  # vector length
     # check whether the distance is small enough to allow stacking
     if 0.0 < dist < 5.5:
         # check whether the angles are in the allowed range
         nn_angle = angle(self.normal, rvec.normal)
         if (nn_angle < 30 or nn_angle > 150):
             n1cc_angle = angle(self.normal, ccvec)
             n2cc_angle = angle(rvec.normal, ccvec)
             return (dist, nn_angle, n1cc_angle, n2cc_angle)
     return (None, None, None, None)
Beispiel #40
0
def RombergMethod(y, dx, show=False):

    axis = -1
    y = asarray(y)
    nd = len(y.shape)
    Nsamps = y.shape[axis]
    Ninterv = Nsamps - 1
    n = 1
    k = 0

    while n < Ninterv:
        n <<= 1
        k += 1

    R = {}
    all = (slice(None), ) * nd
    slice0 = tupleset(all, axis, 0)
    slicem1 = tupleset(all, axis, -1)
    h = Ninterv * asarray(dx) * 1.0
    R[(1, 1)] = (y[slice0] + y[slicem1]) / 2.0 * h
    slice_R = all
    start = stop = step = Ninterv
    for i in range(2, k + 1):
        start >>= 1
        slice_R = tupleset(slice_R, axis, slice(start, stop, step))
        step >>= 1
        R[(i, 1)] = 0.5 * (R[(i - 1, 1)] + h * add.reduce(y[slice_R], axis))
        for j in range(2, i + 1):
            R[(i,j)] = R[(i,j-1)] + \
                       (R[(i,j-1)]-R[(i-1,j-1)]) / ((1 << (2*(j-1)))-1)
        h = h / 2.0

    if show:
        precis = 5
        width = 8
        formstr = "%" + str(width) + '.' + str(precis) + 'f'

        print('\nMétodo de Romberg')
        print('----------------------------------')
        for i in range(1, k + 1):
            for j in range(1, i + 1):
                print(formstr % R[(i, j)], end=' ')
            print()
        print('----------------------------------')

    return R[(k, k)]
Beispiel #41
0
def notes_roc (la, lb, eps):
    from numpy import transpose, add, resize 
    """ creates a matrix of size len(la)*len(lb) then look for hit and miss
    in it within eps tolerance windows """
    gdn,fpw,fpg,fpa,fdo,fdp = 0,0,0,0,0,0
    m = len(la)
    n = len(lb)
    x =           resize(la[:][0],(n,m))
    y = transpose(resize(lb[:][0],(m,n)))
    teps =  (abs(x-y) <= eps[0]) 
    x =           resize(la[:][1],(n,m))
    y = transpose(resize(lb[:][1],(m,n)))
    tpitc = (abs(x-y) <= eps[1]) 
    res = teps * tpitc
    res = add.reduce(res,axis=0)
    for i in range(len(res)) :
        if res[i] > 1:
            gdn+=1
            fdo+=res[i]-1
        elif res [i] == 1:
            gdn+=1
    fpa = n - gdn - fpa
    return gdn,fpw,fpg,fpa,fdo,fdp
def distanceA2AEuclideanSquared(x, std=[], w=[]):
    """
    This function calcule the Euclidean Squared distance between
    two or more variables.
    """
    if std:
        x = nparray(x)
        x = stdobs(x)  #  standardize
        x = x.tolist()
    if w:
        x = nparray(x)
        w = w / float(npadd.reduce(w))
        x = x * w  #  weights
        x = x.tolist()

    numrows = len(x)
    distance = [0]*(numrows-1)

    for row in xrange(numrows - 1):
        npsublist = npsubtract(x[row], x[row + 1])
        sublist = npsublist.tolist()
        distance[row] = [square_double(sublist)]

    return distance
Beispiel #43
0
    def test_reduce_1d(self):
        import numpy as np
        from numpy import array, add, maximum, less, float16, complex64

        assert less.reduce([5, 4, 3, 2, 1])
        assert add.reduce([1, 2, 3]) == 6
        assert maximum.reduce([1]) == 1
        assert maximum.reduce([1, 2, 3]) == 3
        raises(ValueError, maximum.reduce, [])

        assert add.reduce(array([True, False] * 200)) == 200
        assert add.reduce(array([True, False] * 200, dtype="int8")) == 200
        assert add.reduce(array([True, False] * 200), dtype="int8") == -56
        assert type(add.reduce(array([True, False] * 200, dtype="float16"))) is float16
        assert type(add.reduce(array([True, False] * 200, dtype="complex64"))) is complex64

        for dtype in ["bool", "int"]:
            assert np.equal.reduce([1, 2], dtype=dtype) == True
            assert np.equal.reduce([1, 2, 0], dtype=dtype) == False
def model(SWnet, LWnet, Hs, Hl):
    N = SWnet.shape[0]
    Rnet_array = EnergyNetFluxBalance(SWnet, LWnet, Hs, Hl)
    Rnet = add.reduce(Rnet_array, axis=0) / N
    return Rnet
Beispiel #45
0
    def Intersectar(self,tipo='real'):
        '''!
        @brief: Método que cálcula la intersección entre las dos líneas.
        @param tipo str: opción que permite establecer si las intersecciones calculadas son reales o virtuales.
        @note tipo: Valores validos: real,virtual 
        '''
        self.__checkTipo(tipo)
        #Coordenadas de cada punto.
        xi1=self.__l1.getPuntoInicial().getX()
        yi1=self.__l1.getPuntoInicial().getY()
        
        xf1=self.__l1.getPuntoFinal().getX()
        yf1=self.__l1.getPuntoFinal().getY()
        
        xi2=self.__l2.getPuntoInicial().getX()
        yi2=self.__l2.getPuntoInicial().getY()
        
        xf2=self.__l2.getPuntoFinal().getX()
        yf2=self.__l2.getPuntoFinal().getY()
        
        v1=[xf1-xi1,yf1-yi1]
        v2=[xf2-xi2,yf2-yi2]
        A=matrix([[v1[1],-v1[0]],
                  [v2[1],-v2[0]]])
        B=array([xf1*v1[1]-yf1*v1[0],xf2*v2[1]-yf2*v2[0]])
        #print(A,B)
        if det(A)==0:
            A=add.reduce(A, 0)
            B=add.reduce(B, 0)
            #print(A,B)
            if B==0:
                # sol ay=bx
                return None
            if B!=0:
                if A.item((0, 0))==0:
                    x=xi1
                    y=B/A.item((0, 1))
                elif A.item((0, 1))==0:
                    x=B/A.item((0, 0))
                    y=yi1
        else:
            sol=solve(A,B)
            x=sol[0]
            y=sol[1]
            #print(x,y)
        #Cálculo de la interseccion #y=ax+b
#         a1=None
#         a2=None
#         b1=None
#         b2=None
#             
#         if abs(yf1-yi1)==0.0:
#             a1=0.0
#         elif abs(xf1-xi1)==0.0:
#             a1=None
#         else:
#             a1=(yf1-yi1)/(xf1-xi1)
#             
#         if abs(yf2-yi2)==0.0:
#             a2=0.0
#         elif abs(xf2-xi2)==0.0:
#             a2=None
#         else:
#             a2=(yf2-yi2)/(xf2-xi2)
#             
#         if a1==None:
#             b1=xi1
#         else:
#             b1=yi1-a1*xi1
#             
#         if a2==None:
#             b2=xi2
#         else:
#             b2=yi2-a2*xi2
#         
#             
# #         print(a1,a2,b1,b2)
#         if a1==None and a2==None:
#             #Paralelas.
#             return None
#         elif a1==None and a2==0.0:
#             x=xi2
#             y=a2
#             
#         elif a1==0.0 and a2==None:
#             x=b2
#             y=b1
#         elif a1==None:
#             x=b1
#             y=(b2-b1)/(-a2)
#         elif a2==None:
#             x=b2
#             y=(b2-b1)/(a1)
#         elif a1==None and a2==None:
#             x=(b2-b1)
#         else:
#             try:
#                 x=(b2-b1)/(a1-a2)
#                 y=a1*x+b1
#             except:
#                 x=0.0
#                 y=a1*x+b1
                
                
        if tipo=='real':
            #intervalos de definicion.
#             ixi=sorted([xi1,xf1])
#             iyi=sorted([yi1,yf1])
#             ixf=sorted([xi2,xf2])
#             iyf=sorted([yi2,yf2])
#             print(ixi,iyi,ixf,iyf)
#             print(str(round(y,20)))
#             print(round(min(iyi),20))
#             print(y==min(iyi))
#             print(self.__l1.PointIn(pt2.Punto2D(x,y),tolerance=0.001))
#             print(self.__l2.PointIn(pt2.Punto2D(x,y),tolerance=0.001))
#             print(x>=min(ixi),x<=max(ixi),y>=min(iyi),y<=max(iyi),x>=min(ixf),x<=max(ixf),y>=min(iyf),y<=max(iyf))
#             print(greater_equal([x],[min(ixi),min(ixf)]))
#             print(greater_equal([y],[min(iyi),min(iyf)]))
#             print(y,min(iyf))
#             if x>=min(ixi) and x<=max(ixi) and y>=min(iyi) and y<=max(iyi) and x>=min(ixf) and x<=max(ixf) and y>=min(iyf) and y<=max(iyf):
#             if greater_equal([x,min(ixi)],[y,min(iyi)],[x,min(ixf)],[y,min(iyf)]):
            if self.__l1.PointIn(pt2.Punto2D(x,y),tolerance=0.001) and self.__l1.PointIn(pt2.Punto2D(x,y),tolerance=0.001):
                return pt2.Punto2D(x,y)
            else: 
                return None
        elif tipo=='virtual':
            return pt2.Punto2D(x,y)
Beispiel #46
0
def Sum_j(f):
    return add.reduce(f,1)
Beispiel #47
0
def norm(x, ord=None):
    """ norm(x, ord=None) -> n

    Matrix or vector norm.

    Inputs:

      x -- a rank-1 (vector) or rank-2 (matrix) array
      ord -- the order of the norm.

     Comments:
       For arrays of any rank, if ord is None:
         calculate the square norm (Euclidean norm for vectors, Frobenius norm for matrices)

       For vectors ord can be any real number including Inf or -Inf.
         ord = Inf, computes the maximum of the magnitudes
         ord = -Inf, computes minimum of the magnitudes
         ord is finite, computes sum(abs(x)**ord,axis=0)**(1.0/ord)

       For matrices ord can only be one of the following values:
         ord = 2 computes the largest singular value
         ord = -2 computes the smallest singular value
         ord = 1 computes the largest column sum of absolute values
         ord = -1 computes the smallest column sum of absolute values
         ord = Inf computes the largest row sum of absolute values
         ord = -Inf computes the smallest row sum of absolute values
         ord = 'fro' computes the frobenius norm sqrt(sum(diag(X.H * X),axis=0))

       For values ord < 0, the result is, strictly speaking, not a
       mathematical 'norm', but it may still be useful for numerical purposes.
    """
    x = asarray_chkfinite(x)
    if ord is None: # check the default case first and handle it immediately
        return sqrt(add.reduce(real((conjugate(x)*x).ravel())))

    nd = len(x.shape)
    Inf = numpy.Inf
    if nd == 1:
        if ord == Inf:
            return numpy.amax(abs(x))
        elif ord == -Inf:
            return numpy.amin(abs(x))
        elif ord == 1:
            return numpy.sum(abs(x),axis=0) # special case for speedup
        elif ord == 2:
            return sqrt(numpy.sum(real((conjugate(x)*x)),axis=0)) # special case for speedup
        else:
            return numpy.sum(abs(x)**ord,axis=0)**(1.0/ord)
    elif nd == 2:
        if ord == 2:
            return numpy.amax(decomp.svd(x,compute_uv=0))
        elif ord == -2:
            return numpy.amin(decomp.svd(x,compute_uv=0))
        elif ord == 1:
            return numpy.amax(numpy.sum(abs(x),axis=0))
        elif ord == Inf:
            return numpy.amax(numpy.sum(abs(x),axis=1))
        elif ord == -1:
            return numpy.amin(numpy.sum(abs(x),axis=0))
        elif ord == -Inf:
            return numpy.amin(numpy.sum(abs(x),axis=1))
        elif ord in ['fro','f']:
            return sqrt(add.reduce(real((conjugate(x)*x).ravel())))
        else:
            raise ValueError, "Invalid norm order for matrices."
    else:
        raise ValueError, "Improper number of dimensions to norm."
Beispiel #48
0
def norm(x, ord=None):
    """Matrix or vector norm.

    Parameters
    ----------
    x : array, shape (M,) or (M, N)
    ord : number, or {None, 1, -1, 2, -2, inf, -inf, 'fro'}
        Order of the norm:

        =====  ============================  ==========================
        ord    norm for matrices             norm for vectors
        =====  ============================  ==========================
        None   Frobenius norm                2-norm
        'fro'  Frobenius norm                --
        inf    max(sum(abs(x), axis=1))      max(abs(x))
        -inf   min(sum(abs(x), axis=1))      min(abs(x))
        1      max(sum(abs(x), axis=0))      as below
        -1     min(sum(abs(x), axis=0))      as below
        2      2-norm (largest sing. value)  as below
        -2     smallest singular value       as below
        other  --                            sum(abs(x)**ord)**(1./ord)
        =====  ============================  ==========================

    Returns
    -------
    n : float
        Norm of the matrix or vector

    Notes
    -----
    For values ord < 0, the result is, strictly speaking, not a
    mathematical 'norm', but it may still be useful for numerical
    purposes.

    """
    x = asarray_chkfinite(x)
    if ord is None: # check the default case first and handle it immediately
        return sqrt(add.reduce(real((conjugate(x)*x).ravel())))

    nd = len(x.shape)
    Inf = numpy.Inf
    if nd == 1:
        if ord == Inf:
            return numpy.amax(abs(x))
        elif ord == -Inf:
            return numpy.amin(abs(x))
        elif ord == 1:
            return numpy.sum(abs(x),axis=0) # special case for speedup
        elif ord == 2:
            return sqrt(numpy.sum(real((conjugate(x)*x)),axis=0)) # special case for speedup
        else:
            return numpy.sum(abs(x)**ord,axis=0)**(1.0/ord)
    elif nd == 2:
        if ord == 2:
            return numpy.amax(decomp.svd(x,compute_uv=0))
        elif ord == -2:
            return numpy.amin(decomp.svd(x,compute_uv=0))
        elif ord == 1:
            return numpy.amax(numpy.sum(abs(x),axis=0))
        elif ord == Inf:
            return numpy.amax(numpy.sum(abs(x),axis=1))
        elif ord == -1:
            return numpy.amin(numpy.sum(abs(x),axis=0))
        elif ord == -Inf:
            return numpy.amin(numpy.sum(abs(x),axis=1))
        elif ord in ['fro','f']:
            return sqrt(add.reduce(real((conjugate(x)*x).ravel())))
        else:
            raise ValueError, "Invalid norm order for matrices."
    else:
        raise ValueError, "Improper number of dimensions to norm."
Beispiel #49
0
def slash(k):
    from numpy import add

    return matrix(add.reduce([gamma(i) * k[i] for i in range(4)]))
Beispiel #50
0
def romb(y, dx=1.0, axis=-1, show=False):
    """Romberg integration using samples of a function

    Inputs:

       y    -  a vector of 2**k + 1 equally-spaced samples of a fucntion
       dx   -  the sample spacing.
       axis -  the axis along which to integrate
       show -  When y is a single 1-d array, then if this argument is True
               print the table showing Richardson extrapolation from the
               samples.

    Output: ret

       ret  - The integrated result for each axis.

    See also:

      quad - adaptive quadrature using QUADPACK
      romberg - adaptive Romberg quadrature
      quadrature - adaptive Gaussian quadrature
      fixed_quad - fixed-order Gaussian quadrature
      dblquad, tplquad - double and triple integrals
      simps, trapz - integrators for sampled data
      cumtrapz - cumulative integration for sampled data
      ode, odeint - ODE integrators
    """
    y = asarray(y)
    nd = len(y.shape)
    Nsamps = y.shape[axis]
    Ninterv = Nsamps - 1
    n = 1
    k = 0
    while n < Ninterv:
        n <<= 1
        k += 1
    if n != Ninterv:
        raise ValueError, "Number of samples must be one plus a non-negative power of 2."

    R = {}
    all = (slice(None),) * nd
    slice0 = tupleset(all, axis, 0)
    slicem1 = tupleset(all, axis, -1)
    h = Ninterv * asarray(dx) * 1.0
    R[(1, 1)] = (y[slice0] + y[slicem1]) / 2.0 * h
    slice_R = all
    start = stop = step = Ninterv
    for i in range(2, k + 1):
        start >>= 1
        slice_R = tupleset(slice_R, axis, slice(start, stop, step))
        step >>= 1
        R[(i, 1)] = 0.5 * (R[(i - 1, 1)] + h * add.reduce(y[slice_R], axis))
        for j in range(2, i + 1):
            R[(i, j)] = R[(i, j - 1)] + (R[(i, j - 1)] - R[(i - 1, j - 1)]) / ((1 << (2 * (j - 1))) - 1)
        h = h / 2.0

    if show:
        if not isscalar(R[(1, 1)]):
            print "*** Printing table only supported for integrals" + " of a single data set."
        else:
            try:
                precis = show[0]
            except (TypeError, IndexError):
                precis = 5
            try:
                width = show[1]
            except (TypeError, IndexError):
                width = 8
            formstr = "%" + str(width) + "." + str(precis) + "f"

            print "\n       Richardson Extrapolation Table for Romberg Integration       "
            print "===================================================================="
            for i in range(1, k + 1):
                for j in range(1, i + 1):
                    print formstr % R[(i, j)],
                print
            print "====================================================================\n"

    return R[(k, k)]
Beispiel #51
0
def computepra(T):
    """Each element of T, t_ab, should be the times that the observated state trans to b from a.  """
    
    if showpra:
        print """--------------------------------------------------------------------------------"""
        print """Originally input matrix."""
        print T
        print """--------------------------------------------------------------------------------"""

    R = T + 1

    if showpra:
        print """Smoothed input matrix."""
        print R
        print """--------------------------------------------------------------------------------"""
        print """Transport of smoothed input matrix."""

    RConj = R.conj().T

    if showpra:
        print RConj
        print """--------------------------------------------------------------------------------"""

    
    #########################################################################
    # Assumed the input matrix T is a 3x3 matrix, then
    #
    #     | a b c |
    # R = | d e f |, which is a 3x3 matrix,
    #     | g h i | 
    # 
    # and RowSum = |(a+b+c) (d+e+f) (g+h+i)| which is a vector with 3 elements.
    #
    ###########################################################################

    RowSum = add.reduce(R, 1)

    if showpra:
        print """Sum of rows of smoothed input matrix."""
        print RowSum
        print """--------------------------------------------------------------------------------"""
     
    #############################################################################
    # What is the exactly operations of A_{nxn} / B_{1xn} in numpy? It means:
    # [first_column of A] / B_1 and
    # [second_column of A] / B_2 and ... etc.
    # So I transported R (thus obtained RConj) and divide it by sum of row of R, 
    # creating a probability matrix.
    # Note that logilab.hmm.HMM required the probability matrix should be a 
    # row-probability matrix , so I transported it back.
    # 
    # Be uncertain, please refer the numpy manual. The links of online version follows:
    # 
    #   http://www.scipy.org/Numpy_Example_List
    #   http://www.scipy.org/NumPy_for_Matlab_Users
    #
    ###############################################################################
    Temp = RConj / RowSum
    statetransition = Temp.conj().T

    if showpra: 
        print """The final state transition matrix."""
        print statetransition
        print """--------------------------------------------------------------------------------"""
        print """Sum up the rows ..."""
        print add.reduce(statetransition,1)
        print """And minus each elements by 1."""
        print add.reduce(statetransition,1) - 1
        print """--------------------------------------------------------------------------------"""
Beispiel #52
0
 def load(self):
     """
     Attempts to load candidate data from the file, performs file consistency checks if the
     debug flag is set to true. Much of this code has been extracted from PRESTO by Scott Ransom.
     
     Please see:
     
     http://www.cv.nrao.edu/~sransom/presto/
     https://github.com/scottransom/presto
     
     Parameters:
     N/A
     
     Return:
     N/A
     """
     infile = open(self.cand, "rb")
     
     # The code below appears to have been taken from Presto. So it maybe
     # helpful to look at the Presto github repository (see above) to get a better feel
     # for what this code is doing. I certainly have no idea what is going on. Although
     # data is being unpacked in a specific order.
         
     swapchar = '<' # this is little-endian
     data = infile.read(5*4)
     testswap = struct.unpack(swapchar+"i"*5, data)
     # This is a hack to try and test the endianness of the data.
     # None of the 5 values should be a large positive number.
     
     if (fabs(asarray(testswap))).max() > 100000:
         swapchar = '>' # this is big-endian
         
     (self.numdms, self.numperiods, self.numpdots, self.nsub, self.npart) = struct.unpack(swapchar+"i"*5, data)
     (self.proflen, self.numchan, self.pstep, self.pdstep, self.dmstep, self.ndmfact, self.npfact) = struct.unpack(swapchar+"i"*7, infile.read(7*4))
     self.filenm = infile.read(struct.unpack(swapchar+"i", infile.read(4))[0])
     self.candnm = infile.read(struct.unpack(swapchar+"i", infile.read(4))[0])
     self.telescope = infile.read(struct.unpack(swapchar+"i", infile.read(4))[0])
     self.pgdev = infile.read(struct.unpack(swapchar+"i", infile.read(4))[0])
     
     test = infile.read(16)
     has_posn = 1
     for ii in range(16):
         if test[ii] not in '0123456789:.-\0':
             has_posn = 0
             break
         
     if has_posn:
         self.rastr = test[:test.find('\0')]
         test = infile.read(16)
         self.decstr = test[:test.find('\0')]
         (self.dt, self.startT) = struct.unpack(swapchar+"dd", infile.read(2*8))
     else:
         self.rastr = "Unknown"
         self.decstr = "Unknown"
         (self.dt, self.startT) = struct.unpack(swapchar+"dd", test)
         
     (self.endT, self.tepoch, self.bepoch, self.avgvoverc, self.lofreq,self.chan_wid, self.bestdm) = struct.unpack(swapchar+"d"*7, infile.read(7*8))
     (self.topo_pow, tmp) = struct.unpack(swapchar+"f"*2, infile.read(2*4))
     (self.topo_p1, self.topo_p2, self.topo_p3) = struct.unpack(swapchar+"d"*3,infile.read(3*8))
     (self.bary_pow, tmp) = struct.unpack(swapchar+"f"*2, infile.read(2*4))
     (self.bary_p1, self.bary_p2, self.bary_p3) = struct.unpack(swapchar+"d"*3,infile.read(3*8))
     (self.fold_pow, tmp) = struct.unpack(swapchar+"f"*2, infile.read(2*4))
     (self.fold_p1, self.fold_p2, self.fold_p3) = struct.unpack(swapchar+"d"*3,infile.read(3*8))
     (self.orb_p, self.orb_e, self.orb_x, self.orb_w, self.orb_t, self.orb_pd,self.orb_wd) = struct.unpack(swapchar+"d"*7, infile.read(7*8))
     self.dms = asarray(struct.unpack(swapchar+"d"*self.numdms,infile.read(self.numdms*8)))
     
     if self.numdms==1:
         self.dms = self.dms[0]
         
     self.periods = asarray(struct.unpack(swapchar + "d" * self.numperiods,infile.read(self.numperiods*8)))
     self.pdots = asarray(struct.unpack(swapchar + "d" * self.numpdots,infile.read(self.numpdots*8)))
     self.numprofs = self.nsub * self.npart
     
     if (swapchar=='<'):  # little endian
         self.profs = zeros((self.npart, self.nsub, self.proflen), dtype='d')
         for ii in range(self.npart):
             for jj in range(self.nsub):
                 try:
                     self.profs[ii,jj,:] = fromfile(infile, float64, self.proflen)
                 except Exception: # Catch *all* exceptions.
                     pass
                     #print ""
     else:
         self.profs = asarray(struct.unpack(swapchar+"d"*self.numprofs*self.proflen,infile.read(self.numprofs*self.proflen*8)))
         self.profs = reshape(self.profs, (self.npart, self.nsub, self.proflen))
             
     self.binspersec = self.fold_p1 * self.proflen
     self.chanpersub = self.numchan / self.nsub
     self.subdeltafreq = self.chan_wid * self.chanpersub
     self.hifreq = self.lofreq + (self.numchan-1) * self.chan_wid
     self.losubfreq = self.lofreq + self.subdeltafreq - self.chan_wid
     self.subfreqs = arange(self.nsub, dtype='d')*self.subdeltafreq + self.losubfreq
     self.subdelays_bins = zeros(self.nsub, dtype='d')
     self.killed_subbands = []
     self.killed_intervals = []
     self.pts_per_fold = []
     
     # Note: a foldstats struct is read in as a group of 7 doubles
     # the correspond to, in order:
     # numdata, data_avg, data_var, numprof, prof_avg, prof_var, redchi
     self.stats = zeros((self.npart, self.nsub, 7), dtype='d')
     
     for ii in range(self.npart):
         currentstats = self.stats[ii]
         
         for jj in range(self.nsub):
             if (swapchar=='<'):  # little endian
                 try:
                     currentstats[jj] = fromfile(infile, float64, 7)
                 except Exception: # Catch *all* exceptions.
                     pass
                     #print ""
             else:
                 try:
                     currentstats[jj] = asarray(struct.unpack(swapchar+"d"*7,infile.read(7*8)))
                 except Exception: # Catch *all* exceptions.
                     pass
                     #print ""
                 
         self.pts_per_fold.append(self.stats[ii][0][0])  # numdata from foldstats
         
     self.start_secs = add.accumulate([0]+self.pts_per_fold[:-1])*self.dt
     self.pts_per_fold = asarray(self.pts_per_fold)
     self.mid_secs = self.start_secs + 0.5*self.dt*self.pts_per_fold
     
     if (not self.tepoch==0.0):
         self.start_topo_MJDs = self.start_secs/86400.0 + self.tepoch
         self.mid_topo_MJDs = self.mid_secs/86400.0 + self.tepoch
     
     if (not self.bepoch==0.0):
         self.start_bary_MJDs = self.start_secs/86400.0 + self.bepoch
         self.mid_bary_MJDs = self.mid_secs/86400.0 + self.bepoch
         
     self.Nfolded = add.reduce(self.pts_per_fold)
     self.T = self.Nfolded*self.dt
     self.avgprof = (self.profs/self.proflen).sum()
     self.varprof = self.calc_varprof()
     self.barysubfreqs = self.subfreqs
     infile.close()
         
     # If explicit debugging required.
     if(self.debug):
         
         # If candidate file is invalid in some way...
         if(self.isValid()==False):
             
             print "Invalid PFD candidate: ",self.cand
             raise Exception("Invalid PFD candidate: PFDFile.py (Line 214).")
         
         # Candidate file is valid.
         else:
             print "Candidate file valid."
             self.profile = array(self.getprofile())
         
     # Just go directly to feature generation without checks.
     else:
         self.out( "Candidate validity checks skipped.","")
         self.profile = array(self.getprofile())
Beispiel #53
0
def romb(y, dx=1.0, axis=-1, show=False):
    """
    Romberg integration using samples of a function.

    Parameters
    -----------
    y : array_like
        A vector of ``2**k + 1`` equally-spaced samples of a function.
    dx : array_like, optional
        The sample spacing. Default is 1.
    axis : array_like?, optional
        The axis along which to integrate. Default is -1 (last axis).
    show : bool, optional
           When y is a single 1-D array, then if this argument is True
           print the table showing Richardson extrapolation from the
           samples. Default is False.

    Returns
    -------
    ret : array_like?
        The integrated result for each axis.

    See also
    --------
    quad - adaptive quadrature using QUADPACK
    romberg - adaptive Romberg quadrature
    quadrature - adaptive Gaussian quadrature
    fixed_quad - fixed-order Gaussian quadrature
    dblquad, tplquad - double and triple integrals
    simps, trapz - integrators for sampled data
    cumtrapz - cumulative integration for sampled data
    ode, odeint - ODE integrators

    """
    y = asarray(y)
    nd = len(y.shape)
    Nsamps = y.shape[axis]
    Ninterv = Nsamps-1
    n = 1
    k = 0
    while n < Ninterv:
        n <<= 1
        k += 1
    if n != Ninterv:
        raise ValueError("Number of samples must be one plus a "
                "non-negative power of 2.")

    R = {}
    all = (slice(None),) * nd
    slice0 = tupleset(all, axis, 0)
    slicem1 = tupleset(all, axis, -1)
    h = Ninterv*asarray(dx)*1.0
    R[(1,1)] = (y[slice0] + y[slicem1])/2.0*h
    slice_R = all
    start = stop = step = Ninterv
    for i in range(2,k+1):
        start >>= 1
        slice_R = tupleset(slice_R, axis, slice(start,stop,step))
        step >>= 1
        R[(i,1)] = 0.5*(R[(i-1,1)] + h*add.reduce(y[slice_R],axis))
        for j in range(2,i+1):
            R[(i,j)] = R[(i,j-1)] + \
                       (R[(i,j-1)]-R[(i-1,j-1)]) / ((1 << (2*(j-1)))-1)
        h = h / 2.0

    if show:
        if not isscalar(R[(1,1)]):
            print("*** Printing table only supported for integrals" + \
                  " of a single data set.")
        else:
            try:
                precis = show[0]
            except (TypeError, IndexError):
                precis = 5
            try:
                width = show[1]
            except (TypeError, IndexError):
                width = 8
            formstr = "%" + str(width) + '.' + str(precis)+'f'

            print("\n       Richardson Extrapolation Table for Romberg Integration       ")
            print("====================================================================")
            for i in range(1,k+1):
                for j in range(1,i+1):
                    print(formstr % R[(i,j)], end=' ')
                print()
            print("====================================================================\n")

    return R[(k,k)]
Beispiel #54
0
def Sum_i(f):
    return add.reduce(f,0)