Example #1
1
def fuzzy_min(x, A, y, B):
    """
    Finds minimum between fuzzy set A in universe x and fuzzy set B in
    universe y.

    Parameters
    ----------
    x : 1d array, length N
        Universe variable for fuzzy set A.
    A : 1d array, length N
        Fuzzy set for universe x.
    y : 1d array, length M
        Universe variable for fuzzy set B.
    B : 1d array, length M
        Fuzzy set for universe y.

    Returns
    -------
    z : 1d array
        Output variable.
    mfz : 1d array
        Fuzzy membership set for variable z.

    Note
    ----
    Uses Zadeh's Extension Principle from Ross, Fuzzy Logic w/Engineering
    Applications, (2010), pp.414, Eq. 12.17.

    """
    # A and x, and B and y, are formed into (MxN) matrices.  The former has
    # identical rows; the latter identical identical columns.
    N = len(B)
    AA = np.dot(np.atleast_2d(A).T, np.ones((1, N)))
    X = np.dot(np.atleast_2d(x).T, np.ones((1, N)))
    M = len(A)
    BB = np.dot(np.ones((M, 1)), np.atleast_2d(B))
    Y = np.dot(np.ones((M, 1)), np.atleast_2d(y))

    # Take the element-wise minimum
    Z = np.fmin(X, Y).ravel()
    Z_index = np.argsort(Z)
    Z = np.sort(Z)

    # Array min() operation
    C = np.fmin(AA, BB).ravel()
    C = C[Z_index]

    # Initialize loop
    z, mfz = np.empty(0), np.empty(0)
    idx = 0

    for i in range(len(C)):
        index = np.nonzero(Z == Z[idx])[0]
        z = np.hstack((z, Z[idx]))
        mfz = np.hstack((mfz, C[index].max()))
        if Z[idx] == Z.max():
            break
        idx = index.max() + 1

    return z, mfz
Example #2
1
def cartadd(x, y):
    """
    Cartesian addition of two fuzzy membership vectors; algebraic method.

    Parameters
    ----------
    x : 1D array or iterable
        First fuzzy membership vector, of length M.
    y : 1D array or iterable
        Second fuzzy membership vector, of length N.

    Returns
    -------
    z : 2D array
        Cartesian addition of x and y, of shape (M, N).

    """
    # Ensure rank-1 input
    x, y = np.asarray(x).ravel(), np.asarray(y).ravel()

    m, n = len(x), len(y)

    a = np.dot(np.atleast_2d(x).T, np.ones((1, n)))
    b = np.dot(np.ones((m, 1)), np.atleast_2d(y))

    return a + b
Example #3
1
def cartprod(x, y):
    """
    Cartesian product of two fuzzy membership vectors. Uses `min()`.

    Parameters
    ----------
    x : 1D array or iterable
        First fuzzy membership vector, of length M.
    y : 1D array or iterable
        Second fuzzy membership vector, of length N.

    Returns
    -------
    z : 2D array
        Cartesian product of x and y, of shape (M, N).

    """
    # Ensure rank-1 input
    x, y = np.asarray(x).ravel(), np.asarray(y).ravel()

    m, n = len(x), len(y)

    a = np.dot(np.atleast_2d(x).T, np.ones((1, n)))
    b = np.dot(np.ones((m, 1)), np.atleast_2d(y))

    return np.fmin(a, b)
    def rand(self):

        m, n = self.__m, self.__n

        s = linalg.cholesky(self.__prod).transpose()
        w = self.__weight

        # Compute the parameters of the posterior distribution.
        mu = linalg.solve(s[:m, :m], s[:m, m:])
        omega = np.dot(s[:m, :m].transpose(), s[:m, :m])
        sigma = np.dot(s[m:, m:].transpose(), s[m:, m:]) / w
        eta = w

        # Simulate the marginal Wishart distribution.
        f = linalg.solve(
            np.diag(np.sqrt(2.0 * random.gamma((eta - np.arange(n)) / 2.0))) + np.tril(random.randn(n, n), -1),
            np.sqrt(eta) * linalg.cholesky(sigma).transpose(),
        )
        b = np.dot(f.transpose(), f)

        # Simulate the conditional Gauss distribution.
        a = mu + linalg.solve(
            linalg.cholesky(omega).transpose(), np.dot(random.randn(m, n), linalg.cholesky(b).transpose())
        )

        return a, b
Example #5
1
def computeCost(theta, X, y, l=0):
    m = y.size
    term1 = np.dot(-np.array(y).T, np.log(h(theta, X)))
    term2 = np.dot((1 - np.array(y)).T, np.log(1 - h(theta, X)))
    regterm = (l / 2) * np.sum(np.dot(theta[1:].T, theta[1:]))
    J = float((1.0 / m) * (np.sum(term1 - term2) + regterm))
    return J
Example #6
1
    def test_get_slab(self):
        s = self.get_structure("LiFePO4")
        gen = SlabGenerator(s, [0, 0, 1], 10, 10)
        s = gen.get_slab(0.25)
        self.assertAlmostEqual(s.lattice.abc[2], 20.820740000000001)

        fcc = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"], [[0, 0, 0]])
        gen = SlabGenerator(fcc, [1, 1, 1], 10, 10)
        slab = gen.get_slab()
        gen = SlabGenerator(fcc, [1, 1, 1], 10, 10, primitive=False)
        slab_non_prim = gen.get_slab()
        self.assertEqual(len(slab), 6)
        self.assertEqual(len(slab_non_prim), len(slab) * 4)

        # Some randomized testing of cell vectors
        for i in range(1, 231):
            i = random.randint(1, 230)
            sg = SpaceGroup.from_int_number(i)
            if sg.crystal_system == "hexagonal" or (sg.crystal_system == "trigonal" and sg.symbol.endswith("H")):
                latt = Lattice.hexagonal(5, 10)
            else:
                # Cubic lattice is compatible with all other space groups.
                latt = Lattice.cubic(5)
            s = Structure.from_spacegroup(i, latt, ["H"], [[0, 0, 0]])
            miller = (0, 0, 0)
            while miller == (0, 0, 0):
                miller = (random.randint(0, 6), random.randint(0, 6), random.randint(0, 6))
            gen = SlabGenerator(s, miller, 10, 10)
            a, b, c = gen.oriented_unit_cell.lattice.matrix
            self.assertAlmostEqual(np.dot(a, gen._normal), 0)
            self.assertAlmostEqual(np.dot(b, gen._normal), 0)
Example #7
1
def log(T):
    (R, p) = breakTrans(T)
    if np.array_equiv(R, np.eye(3)):
        w = np.zeros((3, 1))
        theta = 1
        u = p

    else:
        tr = R.trace()
        if tr == -1:
            theta = -math.pi
            w = np.array([[sqrt((R[i][i] + 1) / 2) for i in range(3)]]).T
            W = bra(w)
        else:
            theta = acos((tr - 1) / 2)
            W = (1 / (2 * sin(theta))) * (R - R.T)
            w = ibra(W)

        W2 = np.dot(W, W)
        G1 = 1.0 / theta * np.eye(3)
        G2 = 0.5 * W
        G3 = (1.0 / theta - cot(theta / 2) / 2) * W2
        G = G1 + G2 + G3
        # This corrects the bug!!!!!
        G = G.T
        u = np.dot(G, p)

    h = np.dot(w.T, u)
    return (w, u, theta, h)
Example #8
1
    def train(self, batchdata, nepochs, cdsteps=1, passthroughs=None):
        errors = N.empty(nepochs)
        numcases, numvis, numbatches = batchdata.shape
        for epoch in xrange(nepochs):
            vishidinc = N.zeros((self._numvis, self._numhid))
            hidbiasinc = N.zeros((self._numhid))
            visbiasinc = N.zeros((self._numvis))
            errsum = 0
            for batch in xrange(batchdata.shape[2]):
                data = batchdata[:, :, batch]
                if passthroughs:
                    for rbm in passthroughs:
                        data = rbm.passthrough(data)
                # Start of positive phase
                poshidprobs, poshidstates = self._hid_activate(self._vishid, self._hidbiases, data, meanfield=False)

                posprods = N.dot(data.T, poshidprobs)
                poshidact = N.sum(poshidprobs, axis=0)
                posvisact = N.sum(data, axis=0)

                # Start negative phase
                if cdsteps == 1:
                    negdata = self._vis_activate(self._vishid.T, self._visbiases, poshidstates, meanfield=True)
                    neghidprobs = self._hid_activate(self._vishid, self._hidbiases, negdata)[1]
                else:
                    neghidstates = poshidstates
                    for i in xrange(cdsteps):
                        negdata, negvisstates = self._vis_activate(self._vishid.T, self._visbiases, neghidstates)

                        neghidstates, neghidprobs = self._hid_activate(self._vishid, self._hidbiases, negvisstates)

                negprods = N.dot(negdata.T, neghidprobs)
                neghidact = N.sum(neghidprobs, axis=0)
                negvisact = N.sum(negdata, axis=0)

                # End of negative phase
                err = N.sum((data - negdata) ** 2)
                errsum = errsum + err
                if epoch + self._trainedfor > 5:
                    momentum = self._finalmomentum
                else:
                    momentum = self._initialmomentum

                # Updates

                vishidinc = momentum * vishidinc + self._epsilonw * (
                    (posprods - negprods) / numcases - self._weightcost * self._vishid
                )
                visbiasinc = momentum * visbiasinc + (self._epsilonvb / numcases) * (posvisact - negvisact)
                hidbiasinc = momentum * hidbiasinc + (self._epsilonhb / numcases) * (poshidact - neghidact)

                self._vishid += vishidinc
                self._visbiases += visbiasinc
                self._hidbiases += hidbiasinc
                if passthroughs:
                    del data
            print "epoch %5d, error = %10.5f" % (epoch + 1 + self._trainedfor, errsum)
            errors[epoch] = errsum
        self._trainedfor += nepochs
        return errors
Example #9
1
def plot_results(band, yatsm_config, yatsm_model, plot_type="TS"):
    step = -1 if yatsm_config["reverse"] else 1
    design = re.sub(r"[\+\-][\ ]+C\(.*\)", "", yatsm_config["design_matrix"])

    for i, r in enumerate(yatsm_model.record):
        label = "Model {i}".format(i=i)
        if plot_type == "TS":
            mx = np.arange(r["start"], r["end"], step)
            mX = patsy.dmatrix(design, {"x": mx}).T

            my = np.dot(r["coef"][:, band], mX)
            mx_date = np.array([dt.datetime.fromordinal(int(_x)) for _x in mx])

        elif plot_type == "DOY":
            yr_end = dt.datetime.fromordinal(r["end"]).year
            yr_start = dt.datetime.fromordinal(r["start"]).year
            yr_mid = int(yr_end - (yr_end - yr_start) / 2)

            mx = np.arange(dt.date(yr_mid, 1, 1).toordinal(), dt.date(yr_mid + 1, 1, 1).toordinal(), 1)
            mX = patsy.dmatrix(design, {"x": mx}).T

            my = np.dot(r["coef"][:, band], mX)
            mx_date = np.array([dt.datetime.fromordinal(d).timetuple().tm_yday for d in mx])

            label = "Model {i} - {yr}".format(i=i, yr=yr_mid)

        plt.plot(mx_date, my, lw=2, label=label)
        plt.legend()
Example #10
1
def incremental_SVD(X, K, num, by_row=True):
    """
  Takes a vector of k values!
  """
    if by_row:
        u, s, v = np.linalg.svd(X[:num, :])

        uk_list = []
        sk_list = []
        vk_list = []
        for k in K:
            uk = u[:, :k]
            sk = np.diag(s[:k])
            vk = v[:k, :]
            sk_inv = np.linalg.pinv(sk)

            for i in xrange(X.shape[0] - num):
                c = X[num + i, :]
                cp = np.dot(np.dot(c, vk.T), sk_inv)
                uk = np.vstack((uk, cp))

            uk_list.append(uk)
            sk_list.append(sk)
            vk_list.append(vk)

    return uk_list, sk_list, vk_list
    def p_y_given_x(
        self, xi, tag
    ):  # given xi determine the probability of y - note: we have all the f(x, y) values for all y in the dataset
        # print 'TAGS = ', self.tag_set
        normalizer = 0.0
        feat = self.get_feats(xi, tag)
        # print "TAG = ", tag
        # print "Feats for ", tag, " = ", feat
        # for i in range(len(feat)):
        # print feat[i], self.model[i]

        dot_vector = numpy.dot(numpy.array(feat), self.model)
        for t in self.tag_set:
            feat = self.get_feats(xi, t)
            dp = numpy.dot(numpy.array(feat), self.model)
            if dp == 0:
                normalizer += 1.0
            else:
                normalizer += math.exp(dp)
        if dot_vector == 0:
            val = 1.0
        else:
            val = math.exp(dot_vector)  #
            # print 'dotv = ', dot_vector, ' norm = ', normalizer, ' val = ', val
        result = float(val) / normalizer
        return result
Example #12
0
    def test_calc_receive_filter(self):
        Pu = self.Pu
        noise_var = self.noise_var
        num_users = self.num_users
        # num_antenas = self.num_antenas
        channel = randn_c(self.iNr, self.iNt)

        (newH, _) = blockdiagonalization.block_diagonalize(channel, num_users, Pu, noise_var)

        # W_bd is a block diagonal matrix, where each "small block" is the
        # receive filter of one user.
        W_bd = blockdiagonalization.calc_receive_filter(newH)

        np.testing.assert_array_almost_equal(np.dot(W_bd, newH), np.eye(np.sum(self.iNt)))

        # Retest for each individual user
        W0 = W_bd[0:2, 0:2]
        newH0 = newH[0:2, 0:2]
        np.testing.assert_array_almost_equal(np.dot(W0, newH0), np.eye(self.iNt / 3))
        W1 = W_bd[2:4, 2:4]
        newH1 = newH[2:4, 2:4]
        np.testing.assert_array_almost_equal(np.dot(W1, newH1), np.eye(self.iNt / 3))
        W2 = W_bd[4:, 4:]
        newH2 = newH[4:, 4:]
        np.testing.assert_array_almost_equal(np.dot(W2, newH2), np.eye(self.iNt / 3))
Example #13
0
    def ResMatS(self, H, K, L, W, EXP):
        # [len,H,K,L,W,EXP]=CleanArgs(H,K,L,W,EXP);
        x = self.x
        y = self.y
        z = self.z
        Q = self.modvec(H, K, L, "latticestar")
        uq = N.zeros((3, self.npts), "d")
        uq[0, :] = H / Q
        #% Unit vector along Q
        uq[1, :] = K / Q
        uq[2, :] = L / Q
        xq = self.scalar(x[0, :], x[1, :], x[2, :], uq[0, :], uq[1, :], uq[2, :], "latticestar")
        yq = self.scalar(y[0, :], y[1, :], y[2, :], uq[0, :], uq[1, :], uq[2, :], "latticestar")
        zq = 0
        # %scattering vector assumed to be in (self.orient1,self.orient2) plane;
        tmat = N.zeros((4, 4, self.npts))
        #%Coordinate transformation matrix
        tmat[3, 3, :] = 1
        tmat[2, 2, :] = 1
        tmat[0, 0, :] = xq
        tmat[0, 1, :] = yq
        tmat[1, 1, :] = xq
        tmat[1, 0, :] = -yq

        RMS = N.zeros((4, 4, self.npts))
        rot = N.zeros((3, 3))
        EXProt = EXP

        #        %Sample shape matrix in coordinate system defined by scattering vector
        for i in range(self.npts):
            sample = EXP[i]["sample"]
            if "shape" in sample:
                rot[0, 0] = tmat[0, 0, i]
                rot[1, 0] = tmat[1, 0, i]
                rot[0, 1] = tmat[0, 1, i]
                rot[1, 1] = tmat[1, 1, i]
                rot[2, 2] = tmat[2, 2, i]
                EXProt[i]["sample"]["shape"] = N.dot(rot, N.dot(sample["shape"], rot.T))

        R0, RM = self.ResMat(Q, W, EXProt)

        for i in range(self.npts):
            RMS[:, :, i] = N.dot((tmat[:, :, i]).transpose(), N.dot(RM[:, :, i], tmat[:, :, i]))

        mul = N.zeros((4, 4))
        e = N.eye(4, 4)
        for i in range(self.npts):
            if "Smooth" in EXP[i]:
                if "X" in (EXP[i]["Smooth"]):
                    mul[0, 0] = 1.0 / (EXP[i]["Smooth"]["X"] ** 2 / 8 / N.log(2))
                    mul[1, 1] = 1.0 / (EXP[i]["Smooth"]["Y"] ** 2 / 8 / N.log(2))
                    mul[2, 2] = 1.0 / (EXP[i]["Smooth"]["E"] ** 2 / 8 / N.log(2))
                    mul[3, 3] = 1.0 / (EXP[i]["Smooth"]["Z"] ** 2 / 8 / N.log(2))
                    R0[i] = (
                        R0[i]
                        / N.sqrt(N.linalg.det(e / RMS[:, :, i]))
                        * N.sqrt(N.linalg.det(e / mul + e / RMS[:, :, i]))
                    )
                    RMS[:, :, i] = e / (e / mul + e / RMS[:, :, i])
        return R0, RMS
Example #14
0
def compute_cost_logistic(theta, X, y, regularise=False, num_classes=5):
    """Cost function for logistic regression.
    Returns negated cost and gradient, because 
    is to be optimised by a minimisation algorithm.
    Faster than compute_cost_logistic_safe."""
    global LAMBDA
    data_count, features_count = X.shape
    # Need to reshape, because optimisation algorithms flatten theta.
    theta = theta.reshape((num_classes, features_count))
    grad = np.zeros(shape=theta.shape)
    dot_theta_x = np.dot(theta, X.T)
    try:
        hypothesis = np.exp(dot_theta_x)
    except FloatingPointError:
        print "FloatingPointError, using compute_cost_logistic_safe function instead"
        return compute_cost_logistic_safe(theta, X, y, regularise, num_classes)
    # Compute probabilities matrix.
    probabilities = np.transpose(hypothesis / np.sum(hypothesis, axis=0))
    # Vector of actual probabilities, or the probabilities to use to calculate the loss
    actual_prob = np.array([probabilities[i][y[i]] for i in range(data_count)])
    loss = np.sum(np.log(actual_prob))  # Logistic loss
    for i in range(0, data_count):
        for k in range(0, num_classes):  # Update gradient for every class.
            grad[k] += np.dot(X[i], (identity(y[i], k) - probabilities[i][k]))
    if regularise:  # Ridge regularisation
        loss = LAMBDA * loss - (1 - LAMBDA) * np.sum(np.square(theta))
        grad = LAMBDA * grad - 2 * (1 - LAMBDA) * theta
    return [-loss, -grad.flatten()]
Example #15
0
    def score(self, beta, Y, nuisance=None):
        """ Gradient of the loglikelihood function at (beta, Y, nuisance).

        The graient of the loglikelihood function at (beta, Y, nuisance) is the
        score function.

        See :meth:`logL` for details.

        Parameters
        ----------
        beta : ndarray
            The parameter estimates.  Must be of length df_model.
        Y : ndarray
            The dependent variable.
        nuisance : dict, optional
            A dict with key 'sigma', which is an optional estimate of sigma. If
            None, defaults to its maximum likelihood estimate (with beta fixed)
            as ``sum((Y - X*beta)**2) / n``, where n=Y.shape[0], X=self.design.

        Returns
        -------
        The gradient of the loglikelihood function.
        """
        # This is overwriting an abstract method of LikelihoodModel
        X = self.wdesign
        wY = self.whiten(Y)
        r = wY - np.dot(X, beta)
        n = self.df_total
        if nuisance is None:
            SSE = (r ** 2).sum(0)
            sigmasq = SSE / n
        else:
            sigmasq = nuisance["sigma"]
        return np.dot(X, r) / sigmasq
Example #16
0
    def get_sig_app(self, sctx, eps_app_eng):
        sig_eng, D_mtx = self.get_corr_pred(sctx, eps_app_eng, 0, 0, 0)
        sig_tensor = map3d_sig_eng_to_mtx(sig_eng)
        # get the transformation matrix

        sctx.r_pnt = sctx.loc
        X_pnt = sctx.fets_eval.get_X_pnt(sctx)
        X, Y, Z = X_pnt

        L = sqrt(Y ** 2 + Z ** 2)
        if L <= 1e-8:
            sig_another_one = 0.0
            sa = 0.0
            ca = 0.0
        else:
            sa = Z / L
            ca = Y / L

            s_alpha = asin(Z / L)
            c_alpha = acos(Z / L)

            n = array([0, -sa, ca], dtype="float_")
            sig_another_one = tensordot(n, dot(n, sig_tensor), [0, 0])

        T = array([[1, 0, 0], [0, ca, sa], [0, -sa, ca]], dtype="float_")
        sig_rotated = dot(T, dot(sig_tensor, T.T))
        return sig_rotated.flatten()
    def train(self, X, y, learning_rate=0.2, epochs=1):

        ####bias setting####
        temp = np.ones([X.shape[0], X.shape[1] + 1])
        temp[:, 0:-1] = X
        X = temp
        y = np.array(y)

        for k in range(epochs):
            i = k % len(X)
            a = [X[i]]

            #### feedforward ####
            for l in range(len(self.weights)):
                a.append(self.activation(np.dot(a[l], self.weights[l])))  # a is the output
            #            print a
            #            a[-2]=[

            #### last layer error gradient ####
            error = y[i] - a[-1]
            deltas = [error * self.activation_deriv(a[-1])]

            #### hidden layer error gradient ####
            for l in range(len(a) - 2, 0, -1):
                #                print self.weights
                deltas.append(np.dot(self.weights[l], deltas[-1]) * self.activation_deriv(a[1]))

            #### backpropagation ####
            for i in range(len(self.weights)):

                layer = np.array([a[len(self.weights) - i - 1]])
                delta = np.array([deltas[i]])

                self.weights[len(self.weights) - i - 1] += learning_rate * np.dot(layer.T, delta)
 def backprop(self, x, y):
     """Return a tuple ``(nabla_b, nabla_w)`` representing the
     gradient for the cost function C_x.  ``nabla_b`` and
     ``nabla_w`` are layer-by-layer lists of numpy arrays, similar
     to ``self.biases`` and ``self.weights``."""
     nabla_b = [np.zeros(b.shape) for b in self.biases]
     nabla_w = [np.zeros(w.shape) for w in self.weights]
     # feedforward
     activation = x
     activations = [x]  # list to store all the activations, layer by layer
     zs = []  # list to store all the z vectors, layer by layer
     for b, w in zip(self.biases, self.weights):
         z = np.dot(w, activation) + b
         zs.append(z)
         activation = sigmoid(z)
         activations.append(activation)
     # backward pass
     delta = self.cost_derivative(activations[-1], y) * sigmoid_prime(zs[-1])
     nabla_b[-1] = delta
     nabla_w[-1] = np.dot(delta, activations[-2].transpose())
     # Note that the variable l in the loop below is used a little
     # differently to the notation in Chapter 2 of the book.  Here,
     # l = 1 means the last layer of neurons, l = 2 is the
     # second-last layer, and so on.  It's a renumbering of the
     # scheme in the book, used here to take advantage of the fact
     # that Python can use negative indices in lists.
     for l in xrange(2, self.num_layers):
         z = zs[-l]
         sp = sigmoid_prime(z)
         delta = np.dot(self.weights[-l + 1].transpose(), delta) * sp
         nabla_b[-l] = delta
         nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())
     return (nabla_b, nabla_w)
Example #19
0
def rotation_matrix(a1, a2, b1, b2):
    """Returns a rotation matrix that rotates the vectors *a1* in the
    direction of *a2* and *b1* in the direction of *b2*.

    In the case that the angle between *a2* and *b2* is not the same
    as between *a1* and *b1*, a proper rotation matrix will anyway be
    constructed by first rotate *b2* in the *b1*, *b2* plane.
    """
    a1 = np.asarray(a1, dtype=float) / np.norm(a1)
    b1 = np.asarray(b1, dtype=float) / np.norm(b1)
    c1 = np.cross(a1, b1)
    c1 /= np.norm(c1)  # clean out rounding errors...

    a2 = np.asarray(a2, dtype=float) / np.norm(a2)
    b2 = np.asarray(b2, dtype=float) / np.norm(b2)
    c2 = np.cross(a2, b2)
    c2 /= np.norm(c2)  # clean out rounding errors...

    # Calculate rotated *b2*
    theta = np.arccos(np.dot(a2, b2)) - np.arccos(np.dot(a1, b1))
    b3 = np.sin(theta) * a2 + np.cos(theta) * b2
    b3 /= np.norm(b3)  # clean out rounding errors...

    A1 = np.array([a1, b1, c1])
    A2 = np.array([a2, b3, c2])
    R = np.linalg.solve(A1, A2).T
    return R
    def cost(self, params):
        self.model = params
        sum_sqr_params = sum([p * p for p in params])  # for regularization
        reg_term = 0.5 * self.reg * sum_sqr_params
        dot_vector = numpy.dot(self.dataset, self.model)

        empirical = numpy.sum(dot_vector)  # this is the emperical counts
        expected = 0.0

        for j in range((self.num_examples)):
            mysum = 0.0
            for tag in self.tag_set:  # get the jth example feature vector for each tag
                fx_yprime = self.all_data[tag][j]  # self.get_feats(self.h_tuples[j][0], tag)
                """
				dot_prod = 0.0
				for f in range(len(fx_yprime)):
					if fx_yprime[f] != 0:
						dot_prod += self.model[f]
				"""
                dot_prod = numpy.dot(fx_yprime, self.model)
                if dot_prod == 0:
                    mysum += 1.0
                else:
                    try:
                        mysum += math.exp(dot_prod)
                    except:
                        print "dot_prod = ", dot_prod, " tag = ", tag, " f = ", fx_yprime, " m = ", self.model
            expected += math.log(mysum)
        if (self.iteration % 100) == 0:
            print "Iteration = ", self.iteration, "Cost = ", (expected - empirical + reg_term)
        self.iteration += 1
        self.cost_value = expected - empirical + reg_term
        return expected - empirical + reg_term
Example #21
0
    def test_predict(self):
        y = tm.makeTimeSeries()
        x = tm.makeTimeDataFrame()
        model1 = ols(y=y, x=x)
        assert_series_equal(model1.predict(), model1.y_predict)
        assert_series_equal(model1.predict(x=x), model1.y_predict)
        assert_series_equal(model1.predict(beta=model1.beta), model1.y_predict)

        exog = x.copy()
        exog["intercept"] = 1.0
        rs = Series(np.dot(exog.values, model1.beta.values), x.index)
        assert_series_equal(model1.y_predict, rs)

        x2 = x.reindex(columns=x.columns[::-1])
        assert_series_equal(model1.predict(x=x2), model1.y_predict)

        x3 = x2 + 10
        pred3 = model1.predict(x=x3)
        x3["intercept"] = 1.0
        x3 = x3.reindex(columns=model1.beta.index)
        expected = Series(np.dot(x3.values, model1.beta.values), x3.index)
        assert_series_equal(expected, pred3)

        beta = Series(0.0, model1.beta.index)
        pred4 = model1.predict(beta=beta)
        assert_series_equal(Series(0.0, pred4.index), pred4)
Example #22
0
    def fit(self, X, y, **params):
        """
        Fit Ridge regression model

        Parameters
        ----------
        X : numpy array of shape [n_samples,n_features]
            Training data
        y : numpy array of shape [n_samples]
            Target values

        Returns
        -------
        self : returns an instance of self.
        """
        self._set_params(**params)

        X = np.asanyarray(X, dtype=np.float)
        y = np.asanyarray(y, dtype=np.float)

        n_samples, n_features = X.shape

        X, y, Xmean, ymean = self._center_data(X, y)

        if n_samples > n_features:
            # w = inv(X^t X + alpha*Id) * X.T y
            self.coef_ = linalg.solve(np.dot(X.T, X) + self.alpha * np.eye(n_features), np.dot(X.T, y))
        else:
            # w = X.T * inv(X X^t + alpha*Id) y
            self.coef_ = np.dot(X.T, linalg.solve(np.dot(X, X.T) + self.alpha * np.eye(n_samples), y))

        self._set_intercept(Xmean, ymean)
        return self
Example #23
0
def exp(x, theta):
    if x.shape[0] == 3 and x.shape[1] == 1:
        W = bra(x)
        I = np.eye(W.shape[0])
        W2 = np.dot(W, W)

        return I + sin(theta) * W + (1 - cos(theta)) * W2
    elif x.shape[0] == 6 and x.shape[1] == 1:
        (w, u) = breakScrew(x)

        W = bra(w)
        I = np.eye(W.shape[0])
        W2 = np.dot(W, W)

        wth = I + sin(theta) * W + (1 - cos(theta)) * W2

        uth_1 = theta * I
        uth_2 = (1 - cos(theta)) * W
        uth_3 = (theta - sin(theta)) * W2

        uth = np.dot(uth_1 + uth_2 + uth_3, u)

        E = np.concatenate((wth, uth), axis=1)
        E = np.concatenate((E, np.array([[0, 0, 0, 1]])), axis=0)

        return E
Example #24
0
 def obj_grad(ww):
     w = np.reshape(ww, (d, 1))
     z = np.dot(X, w)
     gez = derexpp(z)
     gl = lamtip1 * gez - DT
     grad = 2 * a * w + np.reshape(b, (d, 1)) + c * np.dot(XT, gl) / lam / math.exp(GAM)
     return grad[:, 0]
Example #25
0
    def train(self, category):
        training_x, training_y, validation_x, validation_y = self.SplitData(category)
        # Extreme Learning Machine
        # Simple enough for small data, can be scaled if you want. GPU scaling.
        best_prediction = +np.Inf
        neuron = [5, 8, 11, 14, 17, 20, 40, 60, 80]
        for no_neuron in neuron:
            W = np.random.rand(training_x.shape[1] + 1, no_neuron)
            X = np.concatenate((training_x, np.ones((training_x.shape[0], 1))), axis=1)
            Y = training_y
            H = self.mysigmoid(np.dot(X, W))
            B = np.dot(np.dot(np.linalg.inv(np.dot(H.T, H) + 0.0001 * np.eye(H.shape[1])), H.T), Y)

            X_val = np.concatenate((validation_x, np.ones((validation_x.shape[0], 1))), axis=1)
            Y_val = validation_y
            prediction_val = np.dot(self.mysigmoid(np.dot(X_val, W)), B)
            err = np.var((prediction_val - Y_val)) / np.var(Y_val)
            if err < best_prediction:
                B_best = B
                W_best = W
                best_prediction = err
                NO = no_neuron
                Yt_best = np.dot(self.mysigmoid(np.dot(X, W)), B)
                prediction_val_best = np.dot(self.mysigmoid(np.dot(X_val, W)), B)
        print "The number of neuron is %d, and best performance is %f" % (NO, best_prediction)
        return B_best, W_best, Y_val, prediction_val_best
Example #26
0
def affine_backward(dout, cache):
    """
  Computes the backward pass for an affine layer.

  Inputs:
  - dout: Upstream derivative, of shape (N, M)
  - cache: Tuple of:
    - X: Input data, of shape (N, d_1, ... d_k)
    - W: Weights, of shape (D, M)

  Returns a tuple of:
  - dx: Gradient with respect to X, of shape (N, d1, ..., d_k)
  - dw: Gradient with respect to W, of shape (D, M)
  - db: Gradient with respect to b, of shape (M,)
  """
    X, W, b = cache
    dx, dw, db = None, None, None
    #############################################################################
    # TODO: Implement the affine backward pass.                                 #
    #############################################################################
    # print X.shape, dout.shape, W.shape
    N = X.shape[0]
    Xr = X.reshape(N, -1)

    dx = np.dot(dout, W.T)
    dx = dx.reshape(X.shape)
    dw = np.dot(Xr.T, dout)
    db = np.sum(dout, axis=0)

    #############################################################################
    #                             END OF YOUR CODE                              #
    #############################################################################
    return dx, dw, db
Example #27
0
   def dq02abc(self, ohmega=0, *args):
       """
	The dqo transformation can be thought of in geometric terms as the projection of the three 
	separate sinusoidal phase quantities onto two axes rotating with the same angular velocity 
	as the sinusoidal phase quantities. The two axes are called the direct, or d, axis and the 
	quadrature, or q, axis. Not surprisingly the q-axis is at an angle of 90 degrees to 
	(in quadrature with) the direct axis.
	Inverse Park's transformation.
	The ohmega(rad/s) is the rotation speed of the rotating frame. 
	"""
       _T_dq02abc = array(
           [
               [cos(ohmega), sin(ohmega), 1],
               [cos(ohmega - 2.0 * pi / 3.0), sin(ohmega - 2.0 * pi / 3.0), 1],
               [cos(ohmega + 2.0 * pi / 3.0), sin(ohmega + 2.0 * pi / 3.0), 1],
           ]
       )
       if len(args) == 3:
           return dot(_T_dq02abc, array([[args[0]], [args[1]], [args[2]]]))
       elif len(args) == 1:
           return dot(_T_dq02abc, args[0])
       else:
           raise InputArgumentError(
               "The argument have to be the tree current component or" " an array with the tree current component!"
           )
Example #28
0
    def abc2dq0(self, ohmega=0, *args):
        """
		The dqo transformation can be thought of in geometric terms as the projection of the three 
		separate sinusoidal phase quantities onto two axes rotating with the same angular velocity 
		as the sinusoidal phase quantities. The two axes are called the direct, or d, axis and the 
		quadrature, or q, axis. Not surprisingly the q-axis is at an angle of 90 degrees to 
		(in quadrature with) the direct axis.
		Since actual stator variables either to be generated or to be measured are all in stationary a-b-c
		frame, frame transform should be executed in the control.  
		The most popular transform is between stationary a-b-c frame quantities to synchronously 
		rotating d-q quantities
		The ohmega(rad/s) is the rotation speed of the rotating frame. 		
		"""
        _T_abc2dq0 = (
            2.0
            / 3.0
            * array(
                [
                    [cos(ohmega), cos(ohmega - 2.0 * pi / 3.0), cos(ohmega + 2.0 * pi / 3.0)],
                    [sin(ohmega), sin(ohmega - 2.0 * pi / 3.0), sin(ohmega + 2.0 * pi / 3.0)],
                    [1.0 / 2.0, 1.0 / 2.0, 1.0 / 2.0],
                ]
            )
        )

        if len(args) == 3:
            return dot(_T_abc2dq0, array([[args[0]], [args[1]], [args[2]]]))
        elif len(args) == 1:
            return dot(_T_abc2dq0, args[0])
        else:
            raise InputArgumentError(
                "The argument have to be the tree current component or" " an array with the tree current component!"
            )
Example #29
0
def covLIN(hyp=None, x=None, z=None, der=None):
    """Linear Covariance function.
    The covariance function is parameterized as:
    k(x^p,x^q) = sf2 + x^p'*x^q

    There are no hyperparameters:

    hyp = []
 
    Note that there is no bias or scale term; use covConst and covScale to add these

    """

    if hyp == None:  # report number of parameters
        return [0]
    n, m = x.shape

    if z == "diag":
        A = (x * x).sum(axis=1)
    elif z == None:
        A = np.dot(x, x.T) + np.eye(n) * 1e-16  # required for numerical accuracy
    else:  # compute covariance between data sets x and z
        A = np.dot(x, z.T)  # cross covariances

    if der:
        raise Exception("No derivative available in covLIN")

    return A
Example #30
0
def test_embedding():
    in_dim = 10
    out_dim = 4
    batch = 24

    data = mx.sym.Variable("data")
    embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
    exe_test = embed.simple_bind(mx.cpu(), data=(batch,))
    arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
    grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
    np_data = np.random.randint(low=0, high=in_dim, size=batch)
    np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
    np_onehot = np.zeros((batch, in_dim))
    np_onehot[np.arange(batch), np_data] = 1.0
    # forward
    arg_map["data"][:] = np_data
    arg_map["embed_weight"][:] = np_weight
    exe_test.forward()
    assert reldiff(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, np_weight)) < 1e-6
    # backward
    np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
    grad = mx.nd.zeros(np_grad.shape)
    grad[:] = np_grad
    exe_test.backward([grad])
    assert reldiff(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, np_grad)) < 1e-6