Example #1
1
    def test_square_matrices_1(self):
        op4 = OP4()
        # matrices = op4.read_op4(os.path.join(op4Path, fname))
        form1 = 1
        form2 = 2
        form3 = 2
        from numpy import matrix, ones, reshape, arange

        A1 = matrix(ones((3, 3), dtype="float64"))
        A2 = reshape(arange(9, dtype="float64"), (3, 3))
        A3 = matrix(ones((1, 1), dtype="float32"))
        matrices = {"A1": (form1, A1), "A2": (form2, A2), "A3": (form3, A3)}

        for (is_binary, fname) in [(False, "small_ascii.op4"), (True, "small_binary.op4")]:
            op4_filename = os.path.join(op4Path, fname)
            op4.write_op4(op4_filename, matrices, name_order=None, precision="default", is_binary=False)
            matrices2 = op4.read_op4(op4_filename, precision="default")
            (form1b, A1b) = matrices2["A1"]
            (form2b, A2b) = matrices2["A2"]
            self.assertEqual(form1, form1b)
            self.assertEqual(form2, form2b)

            (form1b, A1b) = matrices2["A1"]
            (form2b, A2b) = matrices2["A2"]
            (form3b, A3b) = matrices2["A3"]
            self.assertEqual(form1, form1b)
            self.assertEqual(form2, form2b)
            self.assertEqual(form3, form3b)

            self.assertTrue(array_equal(A1, A1b))
            self.assertTrue(array_equal(A2, A2b))
            self.assertTrue(array_equal(A3, A3b))
            del A1b, A2b, A3b
            del form1b, form2b, form3b
Example #2
1
    def GetStoichiometry(self):
        """ 
            Returns:
                cids         - a list of pseudoisomer IDs
                obs_ids      - a list of observation IDs
                S            - the observation stoichiometric matrix
                gibbs_values - a row vector of the dG0s
                anchored     - a row vector indicating which obs. are anchored 
        """
        n = len(self.observations)  # number of observations

        cids = set()
        for obs in self.observations:
            cids.update(obs.sparse.keys())
        cids = sorted(cids)

        # create the stoichiometric matrix S (rows=pseudoisomers, cols=observations)
        S = np.matrix(np.zeros((len(cids), n)))
        gibbs_values = np.matrix(np.zeros((1, n)))
        anchored = np.matrix(np.zeros((1, n)))
        obs_ids = []
        for i_obs, obs in enumerate(self.observations):
            obs_ids.append(obs.obs_id)
            for cid, coeff in obs.sparse.iteritems():
                i_cid = cids.index(cid)
                S[i_cid, i_obs] = coeff
            gibbs_values[0, i_obs] = obs.dG0
            if obs.anchored:
                anchored[0, i_obs] = 1

        return cids, S, gibbs_values, anchored
Example #3
0
 def calcPCA(self, data):
     data -= np.mean(data, axis=0)
     # data = data / np.std(data, axis=0)
     c = np.cov(data, rowvar=0)
     values, vectors = la.eig(c)
     featureVector = vectors[:, [values.tolist().index(x) for x in np.sort(values)[::-1]]]
     return (np.matrix(featureVector) * np.matrix(data.T)).T
Example #4
0
def geodesic(C0, C1, alpha):
    A = matrix(sqrtm(C0))
    B = matrix(invsqrtm(C0))
    C = B * C1 * B
    D = matrix(powm(C, alpha))
    E = matrix(A * D * A)
    return E
Example #5
0
def feedforward(X, Y, w1, w2):
    # (X, Y) = prepare_matrices(X, Y, w1, w2) #X is now length d+1

    m, d_1 = w1.shape  # since w1 should have an extra row for the constant input
    d = d_1 - 1
    k, m_check = w2.shape
    if m + 1 != m_check:
        raise AssertionError("Weight matrices improperly aligned")
    if not (X.shape == (d, 1) or X.shape == (1, d)):
        raise AssertionError("Problem with X and w1 matrix")
    if not (Y.shape == (k, 1) or Y.shape == (1, k)):
        raise AssertionError("Problem with Y with w2 matrix")

    # get matrices in expected shapes
    X = np.matrix(X).reshape(d, 1)
    X = np.matrix(np.append(X, [[1]], axis=0))
    Y = np.matrix(Y).reshape(k, 1)

    a1 = w1 * X  # m x 1 matrix
    a1 = np.matrix(np.append(a1, [[0]], axis=0))
    z1 = sigmoid(a1)  # a1, z1 are m+1 length matrices

    a2 = w2 * z1  # k x 1 matrix
    f = sigmoid(a2)

    # Calculate error
    loss = np.sum(-np.multiply(Y, np.log(f)) - np.multiply((1 - Y), np.log(1 - f)))

    return (X, Y, f, z1, a1, loss)
def svdUpdate(U, S, V, a, b):
    """
    Update SVD of an (m x n) matrix `X = U * S * V^T` so that
    `[X + a * b^T] = U' * S' * V'^T`
    and return `U'`, `S'`, `V'`.
    
    `a` and `b` are (m, 1) and (n, 1) rank-1 matrices, so that svdUpdate can simulate 
    incremental addition of one new document and/or term to an already existing 
    decomposition.
    """
    rank = U.shape[1]
    m = U.T * a
    p = a - U * m
    Ra = numpy.sqrt(p.T * p)
    assert float(Ra) > 1e-10
    P = (1.0 / float(Ra)) * p
    n = V.T * b
    q = b - V * n
    Rb = numpy.sqrt(q.T * q)
    assert float(Rb) > 1e-10
    Q = (1.0 / float(Rb)) * q

    K = numpy.matrix(numpy.diag(list(numpy.diag(S)) + [0.0])) + numpy.bmat("m ; Ra") * numpy.bmat(" n; Rb").T
    u, s, vt = numpy.linalg.svd(K, full_matrices=False)
    tUp = numpy.matrix(u[:, :rank])
    tVp = numpy.matrix(vt.T[:, :rank])
    tSp = numpy.matrix(numpy.diag(s[:rank]))
    Up = numpy.bmat("U P") * tUp
    Vp = numpy.bmat("V Q") * tVp
    Sp = tSp
    return Up, Sp, Vp
Example #7
0
    def turn(self, dir):
        """
        Parameters
        -----------
        dir : {'R','L','B'}
            turn right, left or back respectively

        Notes
        -----
        The coordinate of the robot is still referring to the bottomleft corner
        """

        print "TURN ", dir
        if dir == "R":
            rotation = np.matrix("0 1;-1 0")
            self.img = pygame.transform.rotate(self.img, -90)
            comm.send("d")
        elif dir == "L":
            rotation = np.matrix("0 -1;1 0")
            self.img = pygame.transform.rotate(self.img, 90)
            comm.send("a")
        elif dir == "B":
            rotation = np.matrix("-1 0;0 -1")
            self.img = pygame.transform.rotate(self.img, 180)
            comm.send("s")

        self.direction = list(np.dot(rotation, np.transpose(np.matrix(self.direction))).A1)
        self.mark_explored()
        comm.sendCommand(self.get_info_to_bt())
def PhiMeasurementMatrices(m, d, pointSet, sampleSize, rescaled=False):
    # Returns an array of randomized measurement matrices of size m x pointset length
    n = len(pointSet[0])
    Mons = multiIndices(n, d)
    monValues = np.matrix(evaluateMonomialsAtPoints(Mons, pointSet))  # Rows are monomials, columns are the points
    # Next we produce a random sample for the coefficients
    cov = []
    for mon1 in Mons:
        row = []
        for mon2 in Mons:
            if mon1 == mon2:
                monL = mon1.copy()
                monL.append(d - np.sum(monL))
                row.append(multinomial(d, monL) / pow(2, d))
            else:
                row.append(0)
        cov.append(row)
    mean = np.zeros(len(Mons))
    Results = []
    for _ in range(sampleSize):
        coeffs = np.random.multivariate_normal(mean, cov, m)
        M = (1 / math.sqrt(m)) * np.matrix(coeffs) * monValues
        if rescaled:
            sF = computeScalingFactor(np.transpose(M) * M)
            M = math.sqrt(sF) * M
        Results.append(M)

    return Results
Example #9
0
def GetAnalytical(beta, K, O, observables):

    # For a harmonic oscillator with spring constant K,
    # x ~ Normal(x_0, sigma^2), where sigma = 1/sqrt(beta K)

    # Compute the absolute dimensionless free energies of each oscillator analytically.
    # f = - ln(sqrt((2 pi)/(beta K)) )
    print "Computing dimensionless free energies analytically..."

    sigma = (beta * K) ** -0.5
    f_k_analytical = -numpy.log(numpy.sqrt(2 * numpy.pi) * sigma)

    Delta_f_ij_analytical = numpy.matrix(f_k_analytical) - numpy.matrix(f_k_analytical).transpose()

    A_k_analytical = dict()
    A_ij_analytical = dict()

    for observe in observables:
        if observe == "RMS displacement":
            A_k_analytical[observe] = sigma  # mean square displacement
        if observe == "potential energy":
            A_k_analytical[observe] = 1 / (2 * beta) * numpy.ones(len(K), float)  # By equipartition
        if observe == "position":
            A_k_analytical[observe] = O  # observable is the position
        if observe == "position^2":
            A_k_analytical[observe] = (1 + beta * K * O ** 2) / (beta * K)  # observable is the position^2

        A_ij_analytical[observe] = A_k_analytical[observe] - numpy.transpose(numpy.matrix(A_k_analytical[observe]))

    return f_k_analytical, Delta_f_ij_analytical, A_k_analytical, A_ij_analytical
Example #10
0
    def set_cross_validation_sets(self):
        """Decides the partitions for different cross-validation sets
        and populates c_indices."""

        if self.c_ratio > 1 or self.c_ratio <= 0:
            print("invalid c_ratio: ", self.c_ratio)
            self.c_indices = np.matrix("-1, -1")
            return

        block_size = (int)(self.n * self.c_ratio)

        if block_size < 1:
            print("cross-validation block_size is less than 1: ", block_size)
            self.c_indices = np.matrix("-1, -1")
            return

        n_blocks = (int)(self.n / block_size)

        if self.n % block_size != 0:
            n_blocks += 1

        self.c_indices = np.empty([n_blocks, 2])

        self.c_indices[:, 0] = list(range(0, n_blocks * block_size, block_size))
        self.c_indices[:, 1] = list(range(block_size, n_blocks * block_size + 1, block_size))

        self.c_indices[-1, 1] = self.n
Example #11
0
    def set_data(self, c_index):
        """Populates train_X, train_Y, c_valid_X and c_valid_Y 
        for cross validation round i."""

        if np.any(self.c_indices == -1):
            self.train_len = self.n
            self.cross_len = 0

            self.train_X = self.x
            self.train_Y = self.y

            self.c_valid_X = np.matrix("0, 0")
            self.c_valid_Y = np.matrix("0, 0")
        else:
            lower = self.c_indices[c_index][0]
            upper = self.c_indices[c_index][1]

            self.cross_len = upper - lower
            self.train_len = self.n - self.cross_len

            self.c_valid_X = self.x[lower:upper, :]
            self.c_valid_Y = self.y[lower:upper, :]

            self.train_X = np.empty([self.train_len, self.m])
            self.train_Y = np.empty([self.train_len, 1])

            l_index = 0
            if lower - 1 >= 0:
                self.train_X[:lower, :] = self.x[:lower, :]
                self.train_Y[:lower, :] = self.y[:lower, :]
                l_index = lower
            if upper < self.n:
                self.train_X[l_index:, :] = self.x[upper : self.n, :]
                self.train_Y[l_index:, :] = self.y[upper : self.n, :]
def svd(new_pixel, terms):
    global WIDTH, HEIGHT
    new_pixel = np.matrix(new_pixel)
    U, s, V = np.linalg.svd(new_pixel, full_matrices=False)
    X = np.matrix(0)
    j = terms
    for i in range(j):
        X = np.add(s[i] * np.matrix(U[:, i]) * np.matrix(V[i, :]), X)
    max_val = X.max()
    min_val = X.min()
    X = (np.abs(X) / (max_val - min_val)) * 255
    (WIDTH, HEIGHT) = X.shape
    for i in range(WIDTH):
        for j in range(HEIGHT):
            if X[i, j] > 255:
                X[i, j] = 255
            elif X[i, j] < 0:
                X[i, j] = 0
            if X[i, j] >= 0 and X[i, j] <= 255:
                pass
            else:
                X[i, j] = 0
    X = np.array(X)
    (WIDTH, HEIGHT) = X.shape
    output_list = []
    output_list = copy.deepcopy(X)
    return output_list
Example #13
0
    def fundamental_matrix(self):
        """
				Return the fundamental matrix.

				.. seealso::

					 Kemeny J. G.; Snell, J. L. 
					 Finite Markov Chains.
					 Springer-Verlag: New York, 1976.

				>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
				>>> T.fundamental_matrix()
				{('B', 'A'): 0.17751479289940991, ('A', 'B'): 0.053254437869822958,
				('A', 'A'): 0.94674556213017902, ('B', 'B'): 0.82248520710059214}
				"""
        try:
            return self._fundamental_matrix
        except AttributeError:
            el2pos, pos2el = self._el2pos_()
            p = self.steady()._toarray(el2pos)
            P = self._numpy_mat(el2pos)
            d = len(p)
            A = numpy.matrix([p for i in range(d)])
            I = numpy.matrix(numpy.identity(d))
            E = numpy.matrix(numpy.ones((d, d)))
            D = numpy.zeros((d, d))
            diag = 1.0 / p
            for pos, val in enumerate(diag):
                D[pos, pos] = val
            Z = numpy.linalg.inv(I - P + A)
            res = Matrix()
            res._from_numpy_mat(Z, pos2el)
            self._fundamental_matrix = res
            return res
Example #14
0
    def simul(self, nbIte, epsilon, pi0):
        """
        fonction qui, à chaque pas de temps, calcule πt
        à partir d’un π0, d’un nombre d’itérations limite,
        et toujours d’un seuil epsilon
        """
        self.res = np.copy(pi0)
        temp = np.copy(pi0)

        def abs_mat(matrice):
            """
            fonction qui met toutes les valeurs d'une matrice en valeur absolue
            """
            for i in range(len(matrice)):
                for j in range(len(matrice[0])):
                    matrice[i][j] = abs(matrice[i][j])
            return matrice

        for i in range(nbIte):
            temp = np.copy(self.res)

            self.res = self.graph.nextStep(np.matrix(self.res))
            """
            il nous reste :
            -> calculer epsilon (diff puissance matrice?)
            -> verifier condition epsilon
            -> courbe d'epsilon                        
            """
            diff = abs_mat(temp - self.res)
            eps = np.matrix.max(np.matrix(diff))

            self.liste_epsilon.append(eps)
            if eps < epsilon:
                break
Example #15
0
def genTestSetLinear(state0=None, length=100, noise=0):
    state = [numpy.matrix([0, 1]).T]
    update = numpy.matrix([[1, 0.1], [0, 0.9]])

    for t in range(length):
        state.append(update * state[t - 1] + noise * numpy.random.normal(0, 1, 1))
    return state
def zerniketoHexapod(z05x, z05y, z06x, z06y, z07d, z08d):

    # inverse of hexapodtoZernike2

    # latest calibration matrix
    hexapodArray20121020 = np.array(
        (
            (0.00e00, 1.07e05, 4.54e03, 0.00e00),
            (1.18e05, -0.00e00, 0.00e00, -4.20e03),
            (-4.36e04, 0.00e00, 0.00e00, -8.20e01),
            (0.00e00, 4.42e04, -8.10e01, 0.00e00),
        )
    )
    alignmentMatrix = np.matrix(hexapodArray20121020)

    # note that z05y ~ -z06x and z05x ~ z06y
    # so we average them
    aveZern5ThetaX = np.mean([z05x, z06y])
    aveZern6ThetaX = np.mean([-z05y, z06x])
    zernikeList = (aveZern5ThetaX, aveZern6ThetaX, z07d, z08d)
    zernikeColVec = np.matrix(zernikeList).transpose()

    hexapodM = alignmentMatrix * zernikeColVec
    hexapodVector = hexapodM.A

    dx = hexapodVector[0][0]
    dy = hexapodVector[1][0]
    xt = hexapodVector[2][0]
    yt = hexapodVector[3][0]

    return dx, dy, xt, yt
    def fit(self, x_train, y_train):
        """ Cacualte prior prob., means and covariance matrix for each class
        Note: x_train must be 2d array, y_train must be 1d array
        """
        # import ipdb; ipdb.set_trace()
        # infer the number of class
        self.num_class, y_train, self.label_map = QDF.transform_label(y_train)

        data = []
        train_count = len(x_train)
        for i in range(self.num_class):
            data.append(list())

        # Note: class indexes must be 0,1,2,... staring with 0
        for i in range(train_count):
            class_index = int(y_train[i])
            data[class_index].append([x_train[i, :]])

        self.mean = []
        self.cov_matrix = []
        self.prior = []
        # ipdb.set_trace()
        for i in range(self.num_class):
            # data[i] = np.matrix(data[i], dtype=np.float64)
            data[i] = np.matrix(np.concatenate(data[i], axis=0), dtype=np.float64)
            self.mean.append(data[i].mean(0).T)
            # np.cov treat each row as one feature, so data[i].T has to be transposed
            self.cov_matrix.append(np.matrix(np.cov(data[i].T)))

            # self.prior.append(len(data[i]) * 1.0 / train_count)
            self.prior.append(1)

        return self
Example #18
0
File: mf.py Project: staticor/ml
    def train(self, ratings, model_path):

        self.mu = ratings.mean
        self.P = 0.001 * np.matrix(np.random.randn(len(ratings.rows), self.num_factor))
        self.bu = 0.001 * np.matrix(np.random.randn(len(ratings.rows), 1))
        self.Q = 0.001 * np.matrix(np.random.randn(len(ratings.cols), self.num_factor))
        self.bi = 0.001 * np.matrix(np.random.randn(len(ratings.cols), 1))

        self.rows = dict(ratings.rows)
        self.cols = dict(ratings.cols)

        if self.validate > 0:
            T = ratings.kv_dict.items()
            random.shuffle(T)
            k = len(T) / self.validate
            self.L_validate = T[0:k]
            self.L_train = T[k:]
        else:
            self.L_train = ratings.kv_dict.items()

        rmse_train = [0.0] * self.max_iter
        rmse_validate = [0.0] * self.max_iter

        for s in range(self.max_iter):

            random.shuffle(self.L_train)
            self.current_sample = 0
            self.sqr_err = 0.0

            self.threads = [ParallelSGD("Thread_%d" % n, self) for n in range(self.num_thread)]

            start = time.time()
            for t in self.threads:
                t.start()
                t.join()
            terminal = time.time()

            duration = terminal - start

            rmse_train[s] = math.sqrt(self.sqr_err / len(ratings.kv_dict))

            if self.validate > 0:
                m = SparseMatrix()
                m.kv_dict = {k: v for (k, v) in self.L_validate}
                rmse_validate[s] = float(self.test(m))

            sys.stderr.write("Iter: %4.4i" % (s + 1))
            sys.stderr.write("\t[Train RMSE] = %f" % rmse_train[s])
            if self.validate > 0:
                sys.stderr.write("\t[Validate RMSE] = %f" % rmse_validate[s])
            sys.stderr.write("\t[Duration] = %f" % duration)
            sys.stderr.write("\t[Samples] = %d\n" % len(self.L_train))

            self.dump_model(model_path + "/" + "model_%4.4i" % (s + 1))
            self.dump_raw_model(model_path + "/" + "model_%4.4i.raw_model" % (s + 1))

        plt.subplot(111)
        plt.plot(range(self.max_iter), rmse_train, "-og")
        plt.plot(range(self.max_iter), rmse_validate, "-xb")
        plt.show()
Example #19
0
 def test_block_diag_1(self):
     """ block_diag with one matrix """
     assert_equal(construct.block_diag([[1, 0]]).todense(), matrix([[1, 0]]))
     assert_equal(construct.block_diag([[[1, 0]]]).todense(), matrix([[1, 0]]))
     assert_equal(construct.block_diag([[[1], [0]]]).todense(), matrix([[1], [0]]))
     # just on scalar
     assert_equal(construct.block_diag([1]).todense(), matrix([[1]]))
Example #20
0
def funcd(params, brf, SB, gps):
    fwd, derivs = reconstructD(gps, SB, params)
    obs = brf
    d = fwd - obs
    Jprime = np.matrix(d) * np.matrix(derivs).T
    e = np.sum(d * d)
    return 0.5 * e, Jprime.T
Example #21
0
    def __init__(self, train_X, train_Y, c_ratio):
        """Initializes learner object
        All prediction variables should be initialized in 
        derived class's __init__()
        The following is done here. Any other pre-processsing specific
        to dervied classes should be done in the derived class
        
        1. Appends bias column to X
        2. sets cross-validation partitions.
        """

        self.x = np.ones([train_X.shape[0], train_X.shape[1] + 1])
        self.x[:, 1:] = np.matrix(train_X)
        self.y = np.matrix(train_Y)

        # orient y matrix correctly
        if self.y.shape[1] != 1 or self.y.shape[1] > self.y.shape[0]:
            self.y = self.y.T

        self.n = self.x.shape[0]
        self.m = self.x.shape[1]

        self.train_len = 0
        self.cross_len = 0
        self.c_ratio = c_ratio

        self.train_X = []
        self.train_Y = []
        self.c_valid_X = []
        self.c_valid_Y = []

        self.c_indices = []

        self.set_cross_validation_sets()
Example #22
0
def riemann_mean(covmats, tol=10e-9, maxiter=50):
    # init
    Nt, Ne, Ne = covmats.shape
    C = numpy.mean(covmats, axis=0)
    k = 0
    J = eye(2)
    nu = 1.0
    tau = 10e19
    crit = norm(J, ord="fro")
    # stop when J<10^-9 or max iteration = 50
    while (crit > tol) and (k < maxiter) and (nu > tol):
        k = k + 1
        C12 = sqrtm(C)
        Cm12 = invsqrtm(C)
        T = zeros((Ne, Ne))

        for index in range(Nt):
            tmp = numpy.dot(numpy.dot(Cm12, covmats[index, :, :]), Cm12)
            T += logm(matrix(tmp))

        # J = mean(T,axis=0)
        J = T / Nt
        crit = norm(J, ord="fro")
        h = nu * crit
        if h < tau:
            C = matrix(C12 * expm(nu * J) * C12)
            nu = 0.95 * nu
            tau = h
        else:
            print "bad"
            nu = 0.5 * nu

    return C
Example #23
0
File: mf.py Project: staticor/ml
    def test(self, ratings):

        U = np.matrix(np.zeros([len(ratings), self.num_factor]))
        V = np.matrix(np.zeros([len(ratings), self.num_factor]))
        b = np.matrix(np.zeros([len(ratings), 1]))

        u_kv = dict()
        v_kv = dict()

        for s, (u, i) in enumerate(ratings.kv_dict):
            if u < len(self.P):
                u_kv[s] = u
            if i < len(self.Q):
                v_kv[s] = i

        U[u_kv.keys()] = self.P[u_kv.values()]
        V[v_kv.keys()] = self.Q[v_kv.values()]

        b[u_kv.keys()] += self.bu[u_kv.values()]
        b[v_kv.keys()] += self.bi[v_kv.values()]

        R = np.matrix(ratings.kv_dict.values()).T
        err = R - (np.multiply(U, V).sum(1) + b + self.mu)
        rmse = math.sqrt(err.T * err / len(ratings))

        return rmse
Example #24
0
    def stdNorm(self, U1, U2):
        print "U1"
        print U1
        print "U2"
        print U2

        mat1 = np.matrix(U1).T
        print mat1
        print mat1.mean(axis=1)
        mat1 = mat1 - mat1.mean(axis=1)
        print mat1
        mat1cov = np.cov(mat1)
        print mat1cov
        p1, l1, p1t = NLA.svd(mat1cov)
        print p1
        print l1
        print p1t
        l1sq = SLA.sqrtm(SLA.inv(np.diag(l1)))
        snU1 = np.dot(np.dot(l1sq, p1.T), mat1)

        mat2 = np.matrix(U2).T
        mat2 = mat2 - mat2.mean(axis=1)
        mat2cov = np.cov(mat2)
        p2, l2, p2t = NLA.svd(mat2cov)
        l2sq = SLA.sqrtm(SLA.inv(np.diag(l2)))
        snU2 = np.dot(np.dot(l2sq, p2.T), mat2)

        print "cov:"
        print np.cov(snU1)
        print np.cov(snU2)

        return snU1, snU2
Example #25
0
def spin1(**kw):
    """
    Returns Spin matrices for spin 1 system. The value of h_bar can be set, otherwise 
    h_bar = 1. Can either return 1 of the spin matrices or tensor S=[Sx Sy Sz].
    """
    h = kw.pop("h", True)
    if kw.pop("h", False):
        h = 1

    axis = kw.pop("axis", True)
    a = h / numpy.sqrt(2)
    Sx = matrix([[0, a, 0], [a, 0, a], [0, a, 0]])
    Sy = matrix([[0, -1j * a, 0], [1j * a, 0, -1j * a], [0, 1j * a, 0]])
    Sz = matrix([[h, 0, 0], [0, 0, 0], [0, 0, -h]])

    if axis == "z":
        S = Sz
    else:
        if axis == "y":
            S = Sy
        else:
            if axis == "x":
                S = Sx
            else:
                S = [Sx, Sy, Sz]
    return S
Example #26
0
    def solve(self, max_steps=50):
        pds = {}
        variables_map = {}

        variables = []
        epsilons = []

        for constraint in self._constraints:
            for var in constraint.variables():
                print(constraint.diff(var))
                variables_map[var] = len(variables)
                variables.append(var)
            epsilons.append(constraint.get_epsilon())

        for i in range(max_steps):
            errors = [constraint.get_error() for constraint in self._constraints]

            if all(abs(error) < epsilon for error, epsilon in zip(errors, epsilons)):
                return True

            jacobian = numpy.matrix(numpy.zeros(shape=(len(self._constraints), len(variables))))
            for constraint_id, constraint in enumerate(self._constraints):
                for var, pd in constraint.error_diff_values().items():
                    var_id = variables_map[var]

                    jacobian[constraint_id, var_id] += pd

            errors = numpy.matrix(errors)

            corrections = numpy.linalg.pinv(jacobian) * errors.T

            for var, correction in zip(variables, corrections.flat):
                var.update_value(-correction)

        return False
Example #27
0
    def computeNextCom(self, p0, x0=[[0, 0], [0, 0]], t=0.05):
        px = p0[0]
        py = p0[1]
        """Compute COM at time  (t  < durrationOfStep*(1-alpha)  ) This function is 
        usefull for MPC implementation """

        # TODO check  t  < durrationOfStep*(1-alpha)
        w2 = self.g / self.h
        w = np.sqrt(w2)
        durrationOfStep = self.durrationOfStep
        x0_x = np.matrix([[x0[0][0]], [x0[0][1]]])

        x0_y = np.matrix([[x0[1][0]], [x0[1][1]]])

        c0_x = x0_x[0, 0]
        c0_y = x0_y[0, 0]
        d_c0_x = x0_x[1, 0]
        d_c0_y = x0_y[1, 0]

        c_x = (c0_x - px) * np.cosh(w * t) + (d_c0_x / w) * np.sinh(w * t) + px
        d_c_x = w * (c0_x - px) * np.sinh(w * t) + d_c0_x * np.cosh(w * t)
        c_y = (c0_y - py) * np.cosh(w * t) + (d_c0_y / w) * np.sinh(w * t) + py
        d_c_y = w * (c0_y - py) * np.sinh(w * t) + d_c0_y * np.cosh(w * t)

        return [c_x, c_y, d_c_x, d_c_y]
Example #28
0
def order_components(A, C):
    """Order components based on their maximum temporal value and size

    Parameters
    -----------
    A:   sparse matrix (d x K)
         spatial components
    C:   matrix or np.ndarray (K x T)
         temporal components

    Returns
    -------
    A_or:  np.ndarray
        ordered spatial components
    C_or:  np.ndarray
        ordered temporal components
    srt:   np.ndarray
        sorting mapping

    """
    A = np.array(A.todense())
    nA2 = np.sqrt(np.sum(A ** 2, axis=0))
    K = len(nA2)
    A = np.array(np.matrix(A) * spdiags(1 / nA2, 0, K, K))
    nA4 = np.sum(A ** 4, axis=0) ** 0.25
    C = np.array(spdiags(nA2, 0, K, K) * np.matrix(C))
    mC = np.ndarray.max(np.array(C), axis=1)
    srt = np.argsort(nA4 * mC)[::-1]
    A_or = A[:, srt] * spdiags(nA2[srt], 0, K, K)
    C_or = spdiags(1.0 / nA2[srt], 0, K, K) * (C[srt, :])

    return A_or, C_or, srt
Example #29
0
def NN(X, Y, w1_guess, w2_guess, thresh, learning_rate, lamb):
    X = np.matrix(X)
    Y = np.matrix(Y)
    d, n = X.shape
    k, n_check = Y.shape
    num_hidden = w1_guess.shape[0]
    if n != n_check:
        raise AssertionError("X and Y shape doesn't match")
    if w1_guess.shape != (num_hidden, d + 1):
        raise AssertionError("w1_guess is not " + str(num_hidden) + ", " + str(d))
    if w2_guess.shape != (k, num_hidden + 1):
        raise AssertionError("w2_guess not specified properly")

    w1 = w1_guess
    w2 = w2_guess
    loss = 1
    num_iterations = 0
    while (loss > thresh) and (num_iterations) < 100:
        num_iterations += 1
        loss = 0
        for i in range(n):
            x = X.T[i]
            y = Y.T[i]
            w1_gradient, w2_gradient = backprop(x, y, w1, w2, lamb)
            w1 = w1 - learning_rate * w1_gradient
            w2 = w2 - learning_rate * w2_gradient
            loss += feedforward(x, y, w1, w2)[5]
        loss = loss / n
    return (w1, w2)
Example #30
0
 def __init__(self, hmmdict):
     self.d = hmmdict
     self.states = {self.d["hidden"][i]: i for i in range(len(self.d["hidden"]))}
     self.obs = {self.d["observables"][i]: i for i in range(len(self.d["observables"]))}
     self.pi = self.d["pi"]
     self.trans = matrix(self.makenested(self.d["transitions"], 3))
     self.emi = matrix(self.makenested(self.d["emissions"], 3))