def do(self, a, b):
     d = linalg.det(a)
     (s, ld) = linalg.slogdet(a)
     if asarray(a).dtype.type in (single, double):
         ad = asarray(a).astype(double)
     else:
         ad = asarray(a).astype(cdouble)
     ev = linalg.eigvals(ad)
     assert_almost_equal(d, multiply.reduce(ev))
     assert_almost_equal(s * np.exp(ld), multiply.reduce(ev))
     if s != 0:
         assert_almost_equal(np.abs(s), 1)
     else:
         assert_equal(ld, -inf)
Exemple #2
0
 def do(self, a, b):
     d = linalg.det(a)
     (s, ld) = linalg.slogdet(a)
     if asarray(a).dtype.type in (single, double):
         ad = asarray(a).astype(double)
     else:
         ad = asarray(a).astype(cdouble)
     ev = linalg.eigvals(ad)
     assert_almost_equal(d, multiply.reduce(ev))
     assert_almost_equal(s * np.exp(ld), multiply.reduce(ev))
     if s != 0:
         assert_almost_equal(np.abs(s), 1)
     else:
         assert_equal(ld, -inf)
Exemple #3
0
def DUMvNormal(*args, **kwargs):
    v = pm.MvNormal(*args, **kwargs)
    global BOUND
    pm.Potential("bound {}".format(BOUND), tt.switch(multiply.reduce(\
        multiply(0 <= v, v <= 1)), 0, -inf))
    BOUND += 1
    return v
Exemple #4
0
    def do(self, a, b):
        d = linalg.det(a)
        (s, ld) = linalg.slogdet(a)
        if asarray(a).dtype.type in (single, double):
            ad = asarray(a).astype(double)
        else:
            ad = asarray(a).astype(cdouble)
        ev = linalg.eigvals(ad)
        assert_almost_equal(d, multiply.reduce(ev, axis=-1))
        assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))

        s = np.atleast_1d(s)
        ld = np.atleast_1d(ld)
        m = (s != 0)
        assert_almost_equal(np.abs(s[m]), 1)
        assert_equal(ld[~m], -inf)
Exemple #5
0
    def do(self, a, b):
        d = linalg.det(a)
        (s, ld) = linalg.slogdet(a)
        if asarray(a).dtype.type in (single, double):
            ad = asarray(a).astype(double)
        else:
            ad = asarray(a).astype(cdouble)
        ev = linalg.eigvals(ad)
        assert_almost_equal(d, multiply.reduce(ev, axis=-1))
        assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))

        s = np.atleast_1d(s)
        ld = np.atleast_1d(ld)
        m = (s != 0)
        assert_almost_equal(np.abs(s[m]), 1)
        assert_equal(ld[~m], -inf)
    def fit(self, X):
        from numpy import sort, unique, allclose, multiply
        from numpy.random import uniform
        from pandas import DataFrame
        from collections import deque
        Centroids_history = deque(maxlen=2)
        self.X = X
        self.centroids_ = None


        """THE TRIALS LOOP - TO SELECT THE BEST RESULT"""
        trials =[]
        for trial_number in range(5):   # max number of trials

            """GENERATE SATRTING RANDOM POINTS"""   # n_clusters points; n_dim columns
            space = sort(X, axis=0)[[0,-1],:]
            while True:
                C = current_centroids = uniform(low=space[0], high=space[1], size=(self.n_clusters, X.shape[-1]))
                y_current = ((X[None, :, :] - C[:, None, :]) ** 2).sum(axis=-1).T.argmin(axis=1)
                if len(unique(y_current)) == self.n_clusters:
                    break

            """THE INNER LOOP"""
            df = DataFrame(X, dtype=X.dtype)
            for loop_number in range(100):

                """RECALCULATE CENTROIDS"""
                dfGrouped = df.groupby(by=y_current).mean()

                """IN CASE A CENTROID IS LOST"""
                if len(unique(y_current))!=self.n_clusters:
                    lost_centroids = sorted(set(range(self.n_clusters)).difference(unique(y_current)))
                    appendage = DataFrame(C[lost_centroids], index=lost_centroids, dtype=C.dtype)
                    dfGrouped = dfGrouped.append(appendage).sort_index()
                C = dfGrouped.values

                """CHECK IF IT IS TIME TO BREAK OUT OF THE LOOP"""
                Centroids_history.append(C)
                if len(Centroids_history)>=2 and allclose(Centroids_history[-2], Centroids_history[-1]):
                    break
                else:
                    y_current = ((X[None, :, :] - C[:, None, :]) ** 2).sum(axis=-1).T.argmin(axis=1)
                #continue the loop
            else:
                from warnings import warn
                warn("max loops reached: {}".format(loop_number))

            """after (successful) breaking out of the loop: CALCULATE VARIANCE"""
            cluster_variances = df.groupby(by=y_current).var(ddof=0).sum(axis=0) ** 0.5
            hypervolume_as_metric_for_total_variance = multiply.reduce(cluster_variances.values)
            trials.append((C, y_current, hypervolume_as_metric_for_total_variance))
        else: "END OF THE TRIALS LOOP"

        """SELECTING THE BEST TRIAL"""
        from operator import itemgetter
        trials = sorted(trials, key=itemgetter(-1))
        self.centroids_ = trials[0][0]
        self._y_train_pred = trials[0][1]
        return self
 def do(self, a, b):
     d = linalg.det(a)
     if asarray(a).dtype.type in (single, double):
         ad = asarray(a).astype(double)
     else:
         ad = asarray(a).astype(cdouble)
     ev = linalg.eigvals(ad)
     assert_almost_equal(d, multiply.reduce(ev))
 def do(self, a, b):
     d = linalg.det(a)
     if asarray(a).dtype.type in (single, double):
         ad = asarray(a).astype(double)
     else:
         ad = asarray(a).astype(cdouble)
     ev = linalg.eigvals(ad)
     assert_almost_equal(d, multiply.reduce(ev))
Exemple #9
0
    def test_multiply(self):
        from numpy import array, multiply, arange

        a = array([-5.0, -0.0, 1.0])
        b = array([3.0, -2.0, -3.0])
        c = multiply(a, b)
        for i in range(3):
            assert c[i] == a[i] * b[i]

        a = arange(15).reshape(5, 3)
        assert (multiply.reduce(a) == array([0, 3640, 12320])).all()
    def test_multiply(self):
        from numpy import array, multiply, arange

        a = array([-5.0, -0.0, 1.0])
        b = array([ 3.0, -2.0,-3.0])
        c = multiply(a, b)
        for i in range(3):
            assert c[i] == a[i] * b[i]

        a = arange(15).reshape(5, 3)
        assert(multiply.reduce(a) == array([0, 3640, 12320])).all()
Exemple #11
0
 def testpattern(self):
     mm = MultiMethod()
     @defmethod(mm, "lambda x : x>0")
     def meth(x):
         return x * mm(x-1)
     @defmethod(mm, "anytype")
     def meth(x):
         return 1
     self.failUnlessEqual(mm(0), 1)
     self.failUnlessEqual(mm(1), 1)
     self.failUnlessEqual(mm(2), 2)
     self.failUnlessEqual(mm(4), 4*3*2)
     from numpy import multiply
     self.failUnlessEqual(mm(10), multiply.reduce(range(1,11)))
Exemple #12
0
 def mestimate(option):
     """
     Use the M-estimate to estimate P(a_i|v_j); the probability of an attribute a_i given the classification v_j out of set of possible classifications V     
     M-estimate from http://www.inf.u-szeged.hu/~ormandi/ai2/06-naiveBayes-example.pdf
     """
     # all of the records which have the classifier as option
     total = self.data[self.data[self.result_column] == option]
     total_num = len(total)
     #arbitrary equivalent sample size
     m = 3
     #calculate the m-estimate. *1.0 to ensure a decimal always returned
     mest = lambda nc: 1.0*(nc+m*p)/(total_num + m)
     attr_estimates = fromiter(( mest(len(total[total[k]==v])) for k,v in attrs.items()),float ,count=len(attrs))
     return p*multiply.reduce(attr_estimates)
Exemple #13
0
    def do(self, a, b):
        d = gula.det(a)
        s, ld = gula.slogdet(a)
        assert_almost_equal(s * np.exp(ld), d)

        if np.csingle == a.dtype.type or np.single == a.dtype.type:
            cmp_type=np.csingle
        else:
            cmp_type=np.cdouble

        ev = gula.eigvals(a.astype(cmp_type))
        assert_almost_equal(d.astype(cmp_type),
                            multiply.reduce(ev.astype(cmp_type),
                                            axis=(ev.ndim-1)))
        if s != 0:
            assert_almost_equal(np.abs(s), 1)
        else:
            assert_equal(ld, -inf)
    def __init__(self, seq, parents_0, parents_1, this_index, weight):
        """
        @param seq: the data sequence
        @type seq: numpy array

        @param parents_0: index of parent value in previous slice 
        5B
        @type parents_0: int

        @param parents_1: index of parent value in current slice 
        @type parents_1: int

        @param this_index: index of node value itself in current slice
        @type this_index: int

        @param weight: sequence weight
        @type weight: float in [0,1]
        """
        # data
        lng = len(seq)
        self.lng = lng
        self.seq = seq.flat
        # self index
        # Cache the indices
        indices = []
        # Step size
        if len(seq.shape) == 1:
            step = 1
        else:
            step = multiply.reduce(seq.shape[1:])
        for l in range(0, lng):
            index = []
            if l > 0:
                for pi in parents_0:
                    index.append((l - 1) * step + pi)
            for pi in parents_1:
                index.append(l * step + pi)
            index.append(l * step + this_index)
            indices.append(index)
        self.this_index = this_index
        self.step = step
        self.indices = indices
        self.weight = weight
Exemple #15
0
def test_move_particle_one_over():
    """ Check density is change by a particle hopping left or right. """
    from numpy import nonzero, multiply
    from numpy.random import randint

    mc = MonteCarlo()

    for i in range(
            100):  # Do this n times, to avoid issues with random numbers
        # Create density
        density = randint(50, size=randint(2, 6))
        # Change it
        new_density = mc.change_density(density)

        # Make sure any movement is by one
        indices = nonzero(density - new_density)[0]
        assert_equal(len(indices), 2, "densities differ in two places")
        assert_equal(multiply.reduce((density - new_density)[indices]), -1,
                     "densities differ by + and - 1")
Exemple #16
0
    def predict_proba(self, X):
        if (self.X is None) or (self.y is None):
            raise Exception("you must fit the model first")
        from numpy import exp, pi, multiply, ndarray
        """ERROR CHECKING"""
        assert all([(type(X) is ndarray),
                    (X.shape[-1] == self.X.shape[-1])]), "inconsistent array"

        nd3D = exp(-(self.X - self._mus)**2 /
                   (self._sigmas**2 * 2)) * (1 / (self._sigmas *
                                                  ((pi * 2)**0.5)))
        nd2D = multiply.reduce(nd3D, axis=-1).T  # P(X|C)     likelihoods
        mx = nd2D * self._probabilities_of_classes  # the numerator for the Bayes formula (i.e. non-normalized probs)
        ndVerticalArray = mx.sum(
            axis=-1
        )[:,
          None]  # marginal totals = priori probabilities = denominator for the Bayes formula (i.e. normalizer)
        self._probabilities = mx / ndVerticalArray
        return self._probabilities
Exemple #17
0
    def __init__(self, seq, parents_0, parents_1, this_index, weight):
        """
        @param seq: the data sequence
        @type seq: numpy array

        @param parents_0: index of parent value in previous slice 
        5B
        @type parents_0: int

        @param parents_1: index of parent value in current slice 
        @type parents_1: int

        @param this_index: index of node value itself in current slice
        @type this_index: int

        @param weight: sequence weight
        @type weight: float in [0,1]
        """
        # data
        lng=len(seq)
        self.lng=lng
        self.seq=seq.flat
        # self index
        # Cache the indices
        indices=[]
        # Step size
        if len(seq.shape)==1:
            step=1
        else:
           step=multiply.reduce(seq.shape[1:])
        for l in range(0, lng):
            index=[]
            if l>0:
                for pi in parents_0:
                    index.append((l-1)*step+pi)
            for pi in parents_1:
                index.append(l*step+pi)
            index.append(l*step+this_index)
            indices.append(index)
        self.this_index=this_index
        self.step=step
        self.indices=indices
        self.weight=weight
Exemple #18
0
def pagerank(H):
    n = len(H)
    for i in range(n):
        H[i] = [x / float(sum(H[i])) for x in H[i]]

    w = zeros(n)
    rho = 1. / n * ones(n)
    for i in range(n):
        if multiply.reduce(H[i] == zeros(n)):
            w[i] = 1
    newH = H + outer(1. / n * w, ones(n))

    theta = 0.85
    G = theta * newH + (1 - theta) * outer(1. / n * ones(n), ones(n))
    print 'initial pagerank scores:'
    print rho
    for j in range(10):
        rho = dot(rho, G)
        print 'pagerank scores after %d iteration:' % (j + 1)
        print rho
def test_move_particle_one_over():
  """ Check density is change by a particle hopping left or right. """
  from numpy import nonzero, multiply
  from numpy.random import randint

  mc = MonteCarlo()

  for i in range(100): # Do this n times, to avoid issues with random numbers
    # Create density
    density = randint(50, size=randint(2, 6))
    # Change it
    new_density = mc.change_density(density)

    # Make sure any movement is by one
    indices = nonzero(density - new_density)[0]
    assert_equal(len(indices), 2, "densities differ in two places")
    assert_equal( 
        multiply.reduce((density - new_density)[indices]), 
        -1,
        "densities differ by + and - 1"
    )
Exemple #20
0
def probabilistic_and(p, axis=0):
    """
    Probabilistic version of AND
    """
    from numpy import array, multiply
    return multiply.reduce(array(p), axis=axis)
Exemple #21
0
def makepattern(smallprimes):
    pattern = ones(multiply.reduce(smallprimes), dtype=bool_)
    pattern[0] = 0
    for p in smallprimes:
        pattern[p::p] = 0
    return pattern
def probabilistic_and(p, axis=0):
    """
    Probabilistic version of AND
    """
    from numpy import array, multiply
    return multiply.reduce(array(p), axis=axis)
Exemple #23
0
    array([[1, 0, 0, 0],
           [1, 0, 0, 0],
           [1, 0, 0, 0],
           [1, 0, 0, 0]]),

    array([[1, 0, 0, 0],
           [0, 1, 0, 0],
           [0, 0, 1, 0],
           [0, 0, 0, 1]]),

    array([[0, 0, 0, 1],
           [0, 0, 1, 0],
           [0, 1, 0, 0],
           [1, 0, 0, 0]])
]

results = []

x, y = data.shape

for mask in masks:
    xm, ym = mask.shape

    for i in range(x-xm+1):
        for j in range(y-ym+1):
            tmp = (mask * data[i:i+xm, j:j+ym]).ravel()
            results.append(multiply.reduce(tmp[tmp > 0]))

print(max(results))