def eval_V(self, t, x):
     if type(t) == float or type(t) == np.float64:
         # print('t is float')
         V = self.V[self.t_to_ind(t)]
         add_fun_const = self.c_add_fun_list[self.t_to_ind(t)]
     else:
         # print('t is not float. t is', type(t))
         V, add_fun_const = t
     if len(x.shape) == 1:
         ii, jj, kk = xe.indices(3)
         feat = self.P(x)
         temp = xe.Tensor([1])
         comp = xe.Tensor()
         temp[0] = 1
         for iter_1 in range(self.r):
             comp = V.get_component(iter_1)
             temp(kk) << temp(ii) * comp(
                 ii, jj, kk) * xe.Tensor.from_buffer(feat[iter_1])(jj)
         return temp[0] + add_fun_const * self.add_fun(t, x)
     else:
         feat = self.P_batch(x)
         temp = np.ones(shape=(1, x.shape[1]))
         for iter_1 in range(x.shape[0]):
             comp = V.get_component(iter_1).to_ndarray()
             temp = np.einsum('il,ijk,jl->kl', temp, comp, feat[iter_1])
         return temp[0] + add_fun_const * self.add_fun(0, x)
    def adapt_ranks(self, U, S, Vt, smin):
        """ Add a new rank to S
        Parameters
        ----------
        U: xe.Tensor
            left part of SVD
        S: xe.Tensor
            middle part of SVD, diagonal matrix
        Vt: xe.Tensor
            right part of SVD
        smin: float
            Threshold for smalles singluar values
    
        Returns
        -------
        Unew: xe.Tensor
            left part of SVD with one rank increased
        Snew: xe.Tensor
            middle part of SVD, diagonal matrix with one rank increased
        Vtnew: xe.Tensor
            right part of SVD with one rank increased
        """
        i1, i2, i3, i4, i5, i6, j1, j2, j3, j4, k1, k2, k3 = xe.indices(13)
        # print('adapt: U before', xe.frob_norm(U))
        res = xe.Tensor()
        #S
        Snew = xe.Tensor([S.dimensions[0] + 1, S.dimensions[1] + 1])
        Snew.offset_add(S, [0, 0])
        Snew[S.dimensions[0], S.dimensions[1]] = 0.01 * smin

        #U
        onesU = xe.Tensor.ones([U.dimensions[0], U.dimensions[1]])
        Unew = xe.Tensor(
            [U.dimensions[0], U.dimensions[1], U.dimensions[2] + 1])
        Unew.offset_add(U, [0, 0, 0])
        res(i1, i2) << U(i1, i2, k1) * U(j1, j2, k1) * onesU(j1, j2)
        onesU = onesU - res
        res(i1, i2) << U(i1, i2, k1) * U(j1, j2, k1) * onesU(j1, j2)
        onesU = onesU - res
        onesU.reinterpret_dimensions([U.dimensions[0], U.dimensions[1], 1])
        if xe.frob_norm(onesU) != 0:
            onesU = onesU / xe.frob_norm(onesU)
            Unew.offset_add(onesU, [0, 0, U.dimensions[2]])

        #Vt
        onesVt = xe.Tensor.ones([Vt.dimensions[1], Vt.dimensions[2]])
        Vtnew = xe.Tensor(
            [Vt.dimensions[0] + 1, Vt.dimensions[1], Vt.dimensions[2]])
        Vtnew.offset_add(Vt, [0, 0, 0])
        res(i1, i2) << Vt(k1, i1, i2) * Vt(k1, j1, j2) * onesVt(j1, j2)
        onesVt = onesVt - res
        res(i1, i2) << Vt(k1, i1, i2) * Vt(k1, j1, j2) * onesVt(j1, j2)
        onesVt = onesVt - res
        onesVt.reinterpret_dimensions([1, Vt.dimensions[1], Vt.dimensions[2]])
        if xe.frob_norm(onesVt) != 0:
            onesVt = onesVt / xe.frob_norm(onesVt)
            Vtnew.offset_add(onesVt, [Vt.dimensions[0], 0, 0])

        return Unew, Snew, Vtnew
Exemple #3
0
def costs_component_gradient_fd(_tt, _mode, _measures, _values, _h=1e-8):
    val0 = costs(_tt, _measures, _values)[0]
    test = xe.TTTensor(_tt)
    ret = xe.Tensor(_tt.get_component(_mode).dimensions)
    for I in range(ret.size):
        testCore = xe.Tensor(_tt.get_component(_mode))
        testCore[I] += _h
        test.set_component(_mode, testCore)
        valI = costs(test, _measures, _values)[0]
        ret[I] = (valI - val0) / _h
    return ret.to_ndarray()
 def contract_feat(self, T, feat_list):
     # y = proj @ x
     ii, jj, kk = xe.indices(3)
     # x_T = xe.Tensor.from_ndarray(y)
     comp = xe.Tensor()
     temp = xe.Tensor([1])
     temp[0] = 1
     r = T.order()
     for iter_1 in range(r):
         comp = T.get_component(iter_1)
         temp(kk) << temp(ii) * comp(ii, jj, kk) * xe.Tensor.from_ndarray(
             feat_list[iter_1])(jj)
     return temp[0]
Exemple #5
0
	def push_right_stack(self, pos) :
		i1,i2,i3, j1,j2,j3, k1,k2 = xe.indices(8)
		Ai = self.A.get_component(pos)
		xi = self.x.get_component(pos)
		bi = self.b.get_component(pos)
		
		tmpA = xe.Tensor()
		tmpB = xe.Tensor()
		tmpA(j1, j2, j3) << xi(j1, k1, i1)*Ai(j2, k1, k2, i2)*xi(j3, k2, i3) \
				* self.rightAStack[-1](i1, i2, i3)
		self.rightAStack.append(tmpA)
		tmpB(j1, j2) << xi(j1, k1, i1)*bi(j2, k1, i2) \
				* self.rightBStack[-1](i1, i2)
		self.rightBStack.append(tmpB)
Exemple #6
0
def random_data_selection(Alist, C1ex, C2list, noo, nos):
    """Creates label data for the given solution C1ex
    Parameters
    ----------
    Alist: list of xerus tensors
        Dictionary in CP format
    C1ex: xerus TTOperator
        Solution for which samples shall be constructed
    C2list: list of xerus tensors
        Selection operator in CP format
    noo: int
        number of dimensions
    nos: int
        number of samples
      
    Returns
    -------
    Y: numpy array
        nos times noo matrix containing the labels for the given solution
    """
    i1, i2, i3, i4, i5, i6, j1, j2, j3, j4, k1, k2, k3, k4 = xerus.indices(14)
    l = np.ones([nos, 1, noo])
    r = np.ones([nos, 1, noo])
    tmp1 = xerus.Tensor()
    for i in range(noo):
        t1 = C1ex.get_component(i)
        t2 = C2list[i]
        t3 = Alist[i]
        tmp1(i1, i2, i3, i4) << t3(k1, i2) * t1(i1, k1, k2, i4) * t2(k2, i3)
        tmp1np = tmp1.to_ndarray()
        l = np.einsum('mid,imdj->mjd', l, tmp1np)
    return np.einsum('mid,mid->dm', l, r)
Exemple #7
0
def build_data_tensor_list2(noo, x, nos, psi, p):
    """Creates the dictionary for given basis functions and samples
    Parameters
    ----------
    noo: int
        number of dimensions
    x: numpy array
        nooxnos containing the sample data
    nos: int
        number of samples
    psi: list of functions
        basisfunctions
    p: int
        number of basisfunctions   
    Returns
    -------
    Alist: list of xerus tensor
        Dictionary operator in CP format
    """
    AList = []
    for i in range(noo):
        tmp = xerus.Tensor([p, nos])
        for k in range(nos):
            for l in range(p):
                tmp[l, k] = psi[l](x[i, k])
        AList.append(tmp)
    return AList
Exemple #8
0
	def solve(self) :
		# build right stack
		self.x.move_core(0, True)
		for pos in reversed(xrange(1, self.d)) :
			self.push_right_stack(pos)
		
		i1,i2,i3, j1,j2,j3, k1,k2 = xe.indices(8)
		residuals = [1000]*10
		
		for itr in xrange(self.maxIterations) :
			residuals.append(self.calc_residual_norm())
			if residuals[-1]/residuals[-10] > 0.99 :
				print("Done! Residual decreased from:", residuals[10], "to", residuals[-1], "in", len(residuals)-10, "sweeps")
				return
			
			print("Iteration:",itr, "Residual:", residuals[-1])
			
			# sweep left -> right
			for pos in xrange(self.d):
				op = xe.Tensor()
				rhs = xe.Tensor()
				
				Ai = self.A.get_component(pos)
				bi = self.b.get_component(pos)
				
				op(i1, i2, i3, j1, j2, j3) << self.leftAStack[-1](i1, k1, j1)*Ai(k1, i2, j2, k2)*self.rightAStack[-1](i3, k2, j3)
				rhs(i1, i2, i3) <<            self.leftBStack[-1](i1, k1) *   bi(k1, i2, k2) *   self.rightBStack[-1](i3, k2)
				
				tmp = xe.Tensor()
				tmp(i1&0) << rhs(j1&0) / op(j1/2, i1/2)
				self.x.set_component(pos, tmp)
				
				if pos+1 < self.d :
					self.x.move_core(pos+1, True)
					self.push_left_stack(pos)
					self.rightAStack.pop()
					self.rightBStack.pop()
			
			
			# right -> left, only move core and update stack
			self.x.move_core(0, True)
			for pos in reversed(xrange(1,self.d)) :
				self.push_right_stack(pos)
				self.leftAStack.pop()
				self.leftBStack.pop()
Exemple #9
0
def build_choice_tensor2(noo):
    """Creates the Selection Tensor for the Fermi Pasta activation pattern
    Parameters
    ----------
    noo: int
        number of dimensions
    Returns
    -------
    Alist: list of xerus tensor
        Selection tensor in CP format for the Fermi Pasta activation pattern
    """
    C2list = []
    tmp = xerus.Tensor([3, noo])
    for j in range(noo):
        if j == 0:
            tmp[2, j] = 1
        elif j == 1:
            tmp[1, j] = 1
        else:
            tmp[0, j] = 1
    C2list.append(tmp)
    for i in range(1, noo - 1):
        tmp = xerus.Tensor([4, noo])
        for j in range(noo):
            if j == i - 1:
                tmp[3, j] = 1
            elif j == i:
                tmp[2, j] = 1
            elif j == i + 1:
                tmp[1, j] = 1
            else:
                tmp[0, j] = 1
        C2list.append(tmp)

    tmp = xerus.Tensor([3, noo])
    for j in range(noo):
        if j == noo - 2:
            tmp[2, j] = 1
        elif j == noo - 1:
            tmp[1, j] = 1
        else:
            tmp[0, j] = 1
    C2list.append(tmp)

    return C2list
Exemple #10
0
def create_S():
    S = xe.Tensor([MAX_NUM_PER_SITE, MAX_NUM_PER_SITE])

    # set diagonal
    for i in xrange(MAX_NUM_PER_SITE):
        S[[i, i]] = -i

    # set offdiagonal
    for i in xrange(MAX_NUM_PER_SITE - 1):
        S[[i, i + 1]] = i + 1

    return 0.07 * S
 def contract_feat_batch(self, T, feat_list):
     # y = proj @ x
     ii, jj, kk, ll = xe.indices(4)
     # x_T = xe.Tensor.from_ndarray(y)
     comp = xe.Tensor()
     temp = np.zeros([1, feat_list[0].shape[1]])
     temp[0, :] = 1
     r = T.order()
     for iter_1 in range(r):
         comp = T.get_component(iter_1).to_ndarray()
         # temp(kk, ll) << temp(ii, ll)*comp(ii, jj, kk)*xe.Tensor.from_ndarray(feat_list[iter_1])(jj,ll )
         temp = np.einsum('il,ijk,jl->kl', temp, comp, feat_list[iter_1])
     return temp[0, :]
Exemple #12
0
def create_operator(degree):
    i, j, k, l = xe.indices(4)

    # create matrices
    M = create_M()
    S = create_S()
    L = create_L()
    Sstar = 0.7 * M + S
    I = xe.Tensor.identity([MAX_NUM_PER_SITE, MAX_NUM_PER_SITE])

    # create empty TTOperator
    A = xe.TTOperator(2 * degree)

    # create first component
    comp = xe.Tensor()
    comp(i, j, k, l) << \
     Sstar(j, k) * xe.Tensor.dirac([1, 3], 0)(i, l) \
     +   L(j, k) * xe.Tensor.dirac([1, 3], 1)(i, l) \
     +   I(j, k) * xe.Tensor.dirac([1, 3], 2)(i, l)

    A.set_component(0, comp)

    # create middle components
    comp(i, j, k, l) << \
       I(j, k) * xe.Tensor.dirac([3, 3], [0, 0])(i, l) \
     + M(j, k) * xe.Tensor.dirac([3, 3], [1, 0])(i, l) \
     + S(j, k) * xe.Tensor.dirac([3, 3], [2, 0])(i, l) \
     + L(j, k) * xe.Tensor.dirac([3, 3], [2, 1])(i, l) \
     + I(j, k) * xe.Tensor.dirac([3, 3], [2, 2])(i, l)

    for c in xrange(1, degree - 1):
        A.set_component(c, comp)

    # create last component
    comp(i, j, k, l) << \
       I(j, k) * xe.Tensor.dirac([3, 1], 0)(i, l) \
     + M(j, k) * xe.Tensor.dirac([3, 1], 1)(i, l) \
     + S(j, k) * xe.Tensor.dirac([3, 1], 2)(i, l)

    A.set_component(degree - 1, comp)

    return A
    def update_components_np(self, G, w, mat_list, rew_MC, n_sweep,
                             P_constraints_vec, smin, omega, kminor, adapt,
                             maxranks, add_fun_list, current_fun_c):
        noo = G.order()
        Smu_left, Gamma, Smu_right, Theta, U_left, U_right, Vt_left, Vt_right = (
            xe.Tensor() for i in range(8))
        p = mat_list[0].shape[0]
        i1, i2, i3, i4, i5, i6, j1, j2, j3, j4, k1, k2, k3 = xe.indices(13)
        constraints_constant = 0
        num_constraints = P_constraints_vec[0].shape[1]
        d = G.order()
        # building Stacks for operators
        lStack_x = [np.ones(shape=[1, rew_MC.size])]
        rStack_x = [np.ones(shape=[1, rew_MC.size])]
        G0_lStack = [np.ones(shape=(1, num_constraints))]
        G0_rStack = [np.ones(shape=(1, num_constraints))]
        if G.order() > 1:
            G.move_core(1)
            G.move_core(0)
        for i0 in range(d - 1, 0, -1):
            G_tmp = G.get_component(i0).to_ndarray()
            A_tmp_x = mat_list[i0]
            rStack_xnp = rStack_x[-1]
            G_tmp_np_x = np.tensordot(G_tmp, A_tmp_x, axes=((1), (0)))
            rStack_xnpres = np.einsum('jkm,km->jm', G_tmp_np_x, rStack_xnp)
            rStack_x.append(rStack_xnpres)

            rStack_G0_tmp = G0_rStack[-1]
            G0_tmp_np = np.tensordot(G_tmp,
                                     P_constraints_vec[i0],
                                     axes=((1), (0)))
            G0_tmp = np.einsum('jkm,km->jm', G0_tmp_np, rStack_G0_tmp)
            # G0_tmp = np.einsum('ijk,jl,kl->il',G_tmp, P_constraints_vec[i0], rStack_G0_tmp)
            G0_rStack.append(G0_tmp)
        #loop over each component from left to right
        for i0 in range(0, d):
            # get singular values and orthogonalize wrt the next core mu
            if i0 > 0:
                # get left and middle component
                Gmu_left = G.get_component(i0 - 1)
                Gmu_middle = G.get_component(i0)
                (U_left(i1, i2, k1), Smu_left(k1, k2), Vt_left(
                    k2, i3)) << xe.SVD(Gmu_left(i1, i2, i3))
                Gmu_middle(i1, i2,
                           i3) << Vt_left(i1, k2) * Gmu_middle(k2, i2, i3)
                if G.ranks()[i0-1] < maxranks[i0-1] and adapt \
                    and Smu_left[int(np.max([Smu_left.dimensions[0] - kminor,0])),int(np.max([int(Smu_left.dimensions[1] - kminor),0]))] > smin:
                    U_left, Smu_left, Gmu_middle = self.adapt_ranks(
                        U_left, Smu_left, Gmu_middle, smin)
                sing = [Smu_left[i, i] for i in range(Smu_left.dimensions[0])]
                # print('left', 'smin', smin, 'sing', sing)

                Gmu_middle(i1, i2,
                           i3) << Smu_left(i1, k1) * Gmu_middle(k1, i2, i3)
                G.set_component(i0 - 1, U_left)
                G.set_component(i0, Gmu_middle)
                Gamma = np.zeros(Smu_left.dimensions
                                 )  # build cut-off sing value matrix Gamma
                for j in range(Smu_left.dimensions[0]):
                    Gamma[j, j] = 1 / np.max([smin, Smu_left[j, j]])
                # print('Gamma', Gamma)
            if i0 < d - 1:
                # get middle and rightcomponent
                Gmu_middle = G.get_component(i0)
                Gmu_right = G.get_component(i0 + 1)
                (U_right(i1, i2, k1), Smu_right(k1, k2), Vt_right(
                    k2, i3)) << xe.SVD(Gmu_middle(i1, i2, i3))

                sing = [
                    Smu_right[i, i] for i in range(Smu_right.dimensions[0])
                ]
                # print('right', 'smin', smin, 'sing', sing)
                Gmu_right(i1, i2,
                          i3) << Vt_right(i1, k1) * Gmu_right(k1, i2, i3)
                #if mu == d-2 and G.ranks()[mu] < maxranks[mu] and adapt and Smu_right[Smu_right.dimensions[0] - kminor,Smu_right.dimensions[1] - kminor] > smin:
                #    U_right, Smu_right, Gmu_right  = adapt_ranks(U_right, Smu_right, Gmu_right,smin)
                Gmu_middle(i1, i2,
                           i3) << U_right(i1, i2, k1) * Smu_right(k1, i3)
                # G.set_component(i0, Gmu_middle)
                # G.set_component(i0+1, Gmu_right)
                Theta = np.zeros([G.ranks()[i0], G.ranks()[i0]
                                  ])  # build cut-off sing value matrix Theta
                # Theta = np.zeros([Gmu_middle.dimensions[2],Gmu_middle.dimensions[2]]) # build cut-off sing value matrix Theta
                for j in range(Theta.shape[0]):
                    if j >= Smu_right.dimensions[0]:
                        sing_val = 0
                    else:
                        singval = Smu_right[j, j]
                    Theta[j, j] = 1 / np.max([smin, singval])
            # update Stacks
            if i0 > 0:
                G_tmp = G.get_component(i0 - 1).to_ndarray()
                A_tmp_x = mat_list[i0 - 1]
                #            G_tmp_np = np.einsum('ijk,jl->ikl', G_tmp, A_tmp_x)
                G_tmp_np_x = np.tensordot(G_tmp, A_tmp_x, axes=((1), (0)))
                lStack_xnp = lStack_x[-1]
                lStack_xnpres = np.einsum('jm,jkm->km', lStack_xnp, G_tmp_np_x)
                lStack_x.append(lStack_xnpres)
                del rStack_x[-1]
                G0_lStack_tmp = G0_lStack[-1]
                G0_tmp_np = np.tensordot(G_tmp,
                                         P_constraints_vec[i0 - 1],
                                         axes=((1), (0)))
                G0_tmp = np.einsum('jm,jkm->km', G0_lStack_tmp, G0_tmp_np)
                # G0_tmp = np.einsum('il,ijk,jl->kl',G0_lStack_tmp, G_tmp, P_constraints_vec[i0-1])
                G0_lStack.append(G0_tmp)
                del G0_rStack[-1]

            Ai_x = mat_list[i0]
            lStack_xnp = lStack_x[-1]
            rStack_xnp = rStack_x[-1]
            op_pre = np.einsum('il,jl,kl->ijkl', lStack_xnp, Ai_x, rStack_xnp)
            #        op = np.einsum('ijkl,mnol->ijkmno', op_pre, op_pre)
            op_G0 = np.einsum('il,jl,kl->ijkl', G0_lStack[-1],
                              P_constraints_vec[i0], G0_rStack[-1])
            op = np.zeros(op_pre.shape[:-1] + op_pre.shape[:-1])
            op_dim = op.shape
            Gi = G.get_component(i0)

            id_reg_p = np.eye(p)
            if i0 > 0:
                id_reg_r = np.eye(Gi.dimensions[2])
                # op_reg(i1,i2,i3,j1,j2,j3) << Gamma(i1,k1) * Gamma(k1,j1) * id_reg_r(i3,j3)  * id_reg_p(i2,j2)
                op_reg = np.einsum('ij,jk,lm,no->inlkom', Gamma, Gamma,
                                   id_reg_r, id_reg_p)
                # print('op_reg', op_reg)
                op += w * w * op_reg
            # input()
            if i0 < d - 1:
                id_reg_l = np.eye(Gi.dimensions[0])
                # op_reg(i1,i2,i3,j1,j2,j3) << Theta(i3,k1) * Theta(k1,j3) * id_reg_l(i1,j1)  * id_reg_p(i2,j2)
                op_reg = np.einsum('ij,jk,lm,no->lnimok', Theta, Theta,
                                   id_reg_l, id_reg_p)
                op += w * w * op_reg

            op = op.reshape((op_dim[0] * op_dim[1] * op_dim[2],
                             op_dim[3] * op_dim[4] * op_dim[5]))

            op = np.vstack([op, np.zeros(op.shape[0])[None, :]])
            op = np.hstack([op, np.zeros(op.shape[0])[:, None]])
            rhs_dim = op_pre.shape[:-1]
            op_pre = op_pre.reshape(op_dim[0] * op_dim[1] * op_dim[2],
                                    op_pre.shape[-1])
            op_pre = np.concatenate([op_pre, add_fun_list[None, :]], axis=0)
            op += np.tensordot(op_pre, op_pre, axes=((1), (1)))
            # op += 2*rew_MC.size*constraints_constant*np.tensordot(op_G0, op_G0, axes=((3),(3)))
            #        rhs = np.einsum('ijkl,l->ijk', op_pre, rew_MC)
            rhs = np.tensordot(op_pre, rew_MC, axes=((1), (0)))

            if (n_sweep == 1 and i0 == 0):
                comp = G.get_component(i0).to_ndarray()
                Ax = np.tensordot(op_pre[:-1, :].reshape(comp.shape +
                                                         (rew_MC.size, )),
                                  comp,
                                  axes=([0, 1, 2], [0, 1, 2]))
                curr_const = np.einsum('il,jl,kl,ijk ->l', G0_lStack[-1],
                                       P_constraints_vec[i0], G0_rStack[-1],
                                       comp)
                w = min(
                    np.linalg.norm(Ax + current_fun_c * add_fun_list - rew_MC)
                    **2 / rew_MC.size +
                    constraints_constant * np.linalg.norm(curr_const)**2,
                    10000)
                # print('first_res', w, np.linalg.norm(Ax - rew_MC)**2/rew_MC.size, constraints_constant*np.linalg.norm(curr_const)**2)
            op += 1e-4 * np.eye(op.shape[0])
            rhs_reshape = rhs
            # rhs_reshape = rhs.reshape((rhs_dim[0] * rhs_dim[1] * rhs_dim[2]))
            sol_arr = np.linalg.solve(op, rhs_reshape)
            current_fun_c = sol_arr[-1]
            sol_arr = sol_arr[:-1]
            sol_arr_reshape = sol_arr.reshape(
                (rhs_dim[0], rhs_dim[1], rhs_dim[2]))
            sol = xe.Tensor.from_buffer(sol_arr_reshape)
            G.set_component(i0, sol)

        # calculate residuum

    #    Ax = np.einsum('jkli,jkl->i', op_pre, sol_arr_reshape)
    # print(i0)
        comp = G.get_component(d - 1).to_ndarray()
        Ax = np.tensordot(op_pre[:-1, :].reshape(comp.shape + (rew_MC.size, )),
                          comp,
                          axes=([0, 1, 2], [0, 1, 2]))
        curr_const = np.einsum('il,jl,kl,ijk ->l', G0_lStack[-1],
                               P_constraints_vec[d - 1], G0_rStack[-1],
                               sol_arr_reshape)
        # print(curr_const)
        error1 = np.linalg.norm(Ax + current_fun_c * add_fun_list -
                                rew_MC)**2 / rew_MC.size
        error2 = constraints_constant * np.linalg.norm(curr_const)**2
        # print('after', error1, error2)
        return w, error1 + error2, current_fun_c
Exemple #14
0
def construct_exact_fermit_pasta(noo, p, beta):
    """Creates exact solution in selection format for the Fermi Pasta problems in the monomials basis,
    for other basis functions this needs to be transformed!
    Parameters
    ----------
    noo: int
        number of dimensions
    p: int
        number of basis functions
    beta: float
        coefficient of the fermit pasta equation
    Returns
    -------
    C1ex: xerus TTOperator
        Exact soulution of FPTU problem in monomials basis functions
    """
    s = 3
    rank = 4
    dim = [p for i in range(0, noo)]
    dim.extend([s + 1 for i in range(0, noo)])
    dim[noo] = s
    dim[2 * noo - 1] = s
    C1ex = xerus.TTOperator(dim)

    comp = xerus.Tensor([rank, p, s + 1, rank])
    #s=0
    comp[0, 0, 0, 0] = 1
    #s=1
    comp[0, 0, 1, 0] = 1
    comp[0, 1, 1, 1] = 1
    comp[0, 2, 1, 2] = 1
    comp[0, 3, 1, 3] = 1
    #s=2
    comp[0, 1, 2, 0] = -2
    comp[0, 3, 2, 0] = -2 * beta
    comp[0, 2, 2, 1] = 3 * beta
    comp[0, 0, 2, 1] = 1
    comp[0, 1, 2, 2] = -3 * beta
    comp[0, 0, 2, 3] = beta

    comp[1, 2, 2, 0] = 3 * beta
    comp[1, 0, 2, 0] = 1
    comp[2, 1, 2, 0] = -3 * beta
    comp[3, 0, 2, 0] = beta
    #s=3
    comp[0, 0, 3, 0] = 1
    comp[1, 1, 3, 0] = 1
    comp[2, 2, 3, 0] = 1
    comp[3, 3, 3, 0] = 1

    comp0 = xerus.Tensor([1, p, s, rank])
    #s=0
    comp0[0, 0, 0, 0] = 1
    #s=1
    comp0[0, 0, 1, 0] = 1
    comp0[0, 1, 1, 1] = 1
    comp0[0, 2, 1, 2] = 1
    comp0[0, 3, 1, 3] = 1
    #s=2
    comp0[0, 1, 2, 0] = -2
    comp0[0, 3, 2, 0] = -2 * beta
    comp0[0, 2, 2, 1] = 3 * beta
    comp0[0, 0, 2, 1] = 1
    comp0[0, 1, 2, 2] = -3 * beta
    comp0[0, 0, 2, 3] = beta

    compd = xerus.Tensor([rank, p, s, 1])
    #s=0
    compd[0, 0, 0, 0] = 1
    #s=1
    compd[0, 1, 1, 0] = -2
    compd[0, 3, 1, 0] = -2 * beta
    compd[1, 2, 1, 0] = 3 * beta
    compd[1, 0, 1, 0] = 1
    compd[2, 1, 1, 0] = -3 * beta
    compd[3, 0, 1, 0] = beta
    #s=2
    compd[0, 0, 2, 0] = 1
    compd[1, 1, 2, 0] = 1
    compd[2, 2, 2, 0] = 1
    compd[3, 3, 2, 0] = 1

    C1ex.set_component(0, comp0)
    for i in range(1, noo - 1):
        C1ex.set_component(i, comp)
    C1ex.set_component(noo - 1, compd)
    return C1ex
    def calc_grad(self, t, x):
        V = self.V[self.t_to_ind(t)]
        # print('t, self.t_to_ind(t)', t, self.t_to_ind(t),'frob_norm(v)', xe.frob_norm(V))
        if len(x.shape) == 1:
            c1, c2, c3 = xe.indices(3)
            feat = self.P(x)
            dfeat = self.dP(x)
            dV = np.zeros(shape=self.r)
            temp = xe.Tensor([1])
            comp = xe.Tensor()
            temp_right = xe.Tensor.ones([1])
            temp_left = xe.Tensor.ones([1])
            list_right = [None] * (self.r)
            list_right[self.r - 1] = xe.Tensor(temp_right)
            for iter_0 in range(self.r - 1, 0, -1):
                comp = V.get_component(iter_0)
                temp_right(c1) << temp_right(c3) * comp(
                    c1, c2, c3) * xe.Tensor.from_buffer(feat[iter_0])(c2)
                #            temp_right = xe.contract(comp, False, temp_right, False, 1)
                #            temp_right = xe.contract(temp_right, False, xe.Tensor.from_buffer(feat[iter_0]), False, 1)
                list_right[iter_0 - 1] = xe.Tensor(temp_right)
            for iter_0 in range(self.r):
                comp = V.get_component(iter_0)
                temp() << temp_left(c1) * comp(
                    c1, c2, c3) * xe.Tensor.from_buffer(
                        dfeat[iter_0])(c2) * list_right[iter_0](c3)
                #            temp = xe.contract(comp, False, list_right[iter_0], False, 1)
                #            temp = xe.contract(temp, False, xe.Tensor.from_buffer(dfeat[iter_0]), False, 1)
                #            temp = xe.contract(temp, False, temp_left, False, 1)
                temp_left(c3) << temp_left(c1) * comp(
                    c1, c2, c3) * xe.Tensor.from_buffer(feat[iter_0])(c2)
                #            temp_left = xe.contract(temp_left, False, comp, False, 1)
                #            temp_left = xe.contract(xe.Tensor.from_buffer(feat[iter_0]), False, temp_left, False, 1)

                dV[iter_0] = temp[0]
            return dV + self.grad_add_fun(t, x)
        else:
            nos = x.shape[1]
            feat = self.P_batch(x)
            dfeat = self.dP_batch(x)
            dV_mat = np.zeros(shape=x.shape)
            temp = np.zeros(1)
            temp_right = np.ones(shape=(1, nos))
            temp_left = np.ones(shape=(1, nos))
            list_right = [None] * (self.r)
            list_right[self.r - 1] = temp_right
            for iter_0 in range(self.r - 1, 0, -1):
                comp = V.get_component(iter_0).to_ndarray()
                #            temp_right(c1) << temp_right(c3) * comp(c1, c2, c3) * feat[iter_0](c2)
                list_right[iter_0 - 1] = np.einsum('kl,ijk,jl->il',
                                                   list_right[iter_0], comp,
                                                   feat[iter_0])
            for iter_0 in range(self.r):
                comp = V.get_component(iter_0).to_ndarray()
                #            temp() << temp_left(c1) * comp(c1, c2, c3) * dfeat[iter_0](c2) \
                #                * list_right[iter_0](c3)
                temp = np.einsum('il,ijk,jl,kl->l', temp_left, comp,
                                 dfeat[iter_0], list_right[iter_0])
                #            temp(c3) << temp_left(c1) * comp(c1, c2, c3) * feat[iter_0](c2)
                temp_left = np.einsum('il,ijk,jl->kl', temp_left, comp,
                                      feat[iter_0])
                dV_mat[iter_0, :] = temp

    #        _u = -gamma/lambd*np.dot(dV, B) - shift_TT
            return dV_mat + self.grad_add_fun(t, x)
Exemple #16
0
def construct_exact_fermit_pasta_random(noo, p, mean=False):
    """Creates exact solution in selection format for the Fermi Pasta problems 
    with random beta_i in the monomials basis with mean field,
    for other basis functions this needs to be transformed!
    Parameters
    ----------
    noo: int
        number of dimensions
    p: int
        number of basis functions
    mean: bool
        if mean field should be used
    Returns
    -------
    C1ex: xerus TTOperator
        Exact soulution of FPTU problem in monomials basis functions
    m: np.array
        the mean field coefficients
    beta: the bet coeffcients
    """
    s = 3
    rank = 4
    dim = [p for i in range(0, noo)]
    dim.extend([s + 1 for i in range(0, noo)])
    dim[noo] = s
    dim[2 * noo - 1] = s
    C1ex = xerus.TTOperator(dim)

    beta = 2 * np.random.rand(noo) - 1
    if mean:
        m = 2 * np.random.rand(noo) - 1
    else:
        m = np.zeros(noo)

    for i in range(1, noo - 1):
        comp = xerus.Tensor([rank, p, s + 1, rank])
        #s=0
        comp[0, 0, 0, 0] = 1
        comp[0, 1, 0, 1] = m[i]
        comp[1, 0, 0, 1] = 1
        #s=1
        comp[0, 0, 1, 0] = 1
        comp[0, 1, 1, 1] = 1
        comp[0, 2, 1, 2] = 1
        comp[0, 3, 1, 3] = 1
        comp[1, 0, 1, 3] = 1 / beta[i + 1]

        #s=2
        comp[0, 1, 2, 0] = -2 + m[i]
        comp[0, 3, 2, 0] = -2 * beta[i]
        comp[0, 2, 2, 1] = 3 * beta[i]
        comp[0, 0, 2, 1] = 1 + m[i + 1]
        comp[0, 1, 2, 2] = -3 * beta[i]
        comp[0, 0, 2, 3] = beta[i]

        comp[1, 2, 2, 0] = 3 * beta[i]
        comp[1, 0, 2, 0] = 1 + m[i - 1]
        comp[2, 1, 2, 0] = -3 * beta[i]
        comp[3, 0, 2, 0] = beta[i]

        #s=3
        comp[3, 0, 3, 0] = 1 / beta[i - 1]
        comp[0, 0, 3, 1] = 1
        comp[1, 1, 3, 1] = 1
        comp[2, 2, 3, 1] = 1
        comp[3, 3, 3, 1] = 1

        C1ex.set_component(i, comp)

    comp0 = xerus.Tensor([1, p, s, rank])
    #s=0
    comp0[0, 0, 0, 0] = 1
    comp0[0, 1, 0, 1] = m[0]
    #s=1
    comp0[0, 0, 1, 0] = 1
    comp0[0, 1, 1, 1] = 1
    comp0[0, 2, 1, 2] = 1
    comp0[0, 3, 1, 3] = 1

    #s=2
    comp0[0, 1, 2, 0] = -2 + m[0]
    comp0[0, 3, 2, 0] = -2 * beta[0]
    comp0[0, 2, 2, 1] = 3 * beta[0]
    comp0[0, 0, 2, 1] = 1 + m[1]
    comp0[0, 1, 2, 2] = -3 * beta[0]
    comp0[0, 0, 2, 3] = beta[0]

    compd = xerus.Tensor([rank, p, s, 1])
    #s=0
    compd[0, 1, 0, 0] = m[noo - 1]
    compd[1, 0, 0, 0] = 1
    #s=1
    compd[0, 1, 1, 0] = -2 + m[noo - 1]
    compd[0, 3, 1, 0] = -2 * beta[noo - 1]
    compd[1, 2, 1, 0] = 3 * beta[noo - 1]
    compd[1, 0, 1, 0] = 1 + m[noo - 2]
    compd[2, 1, 1, 0] = -3 * beta[noo - 1]
    compd[3, 0, 1, 0] = beta[noo - 1]
    #s=2
    compd[0, 0, 2, 0] = 1
    compd[1, 1, 2, 0] = 1
    compd[2, 2, 2, 0] = 1
    compd[3, 3, 2, 0] = 1

    C1ex.set_component(0, comp0)

    C1ex.set_component(noo - 1, compd)
    return C1ex, m, beta
    def calc_laplace(self, t, x):
        if type(t) == float or type(t) == np.float64 or type(t) == int:
            # print('t is float')
            V = self.V[self.t_to_ind(t)]
            add_fun_const = self.c_add_fun_list[self.t_to_ind(t)]
        else:
            # print('t is not float. t is', type(t))
            V, add_fun_const = t
            V = t
        # print('V in calc_grad', xe.Tensor(V))
        # print('t, self.t_to_ind(t)', t, self.t_to_ind(t),'frob_norm(v)', xe.frob_norm(V))
        if len(x.shape) == 1:
            c1, c2, c3 = xe.indices(3)
            feat = self.P(x)
            dfeat = self.ddP(x)
            dV = 0
            temp = xe.Tensor([1])
            comp = xe.Tensor()
            temp_right = xe.Tensor.ones([1])
            temp_left = xe.Tensor.ones([1])
            list_right = [None] * (self.r)
            list_right[self.r - 1] = xe.Tensor(temp_right)
            for iter_0 in range(self.r - 1, 0, -1):
                comp = V.get_component(iter_0)
                temp_right(c1) << temp_right(c3) * comp(
                    c1, c2, c3) * xe.Tensor.from_buffer(feat[iter_0])(c2)
                #            temp_right = xe.contract(comp, False, temp_right, False, 1)
                #            temp_right = xe.contract(temp_right, False, xe.Tensor.from_buffer(feat[iter_0]), False, 1)
                list_right[iter_0 - 1] = xe.Tensor(temp_right)
            for iter_0 in range(self.r):
                comp = V.get_component(iter_0)
                temp() << temp_left(c1) * comp(
                    c1, c2, c3) * xe.Tensor.from_buffer(
                        dfeat[iter_0])(c2) * list_right[iter_0](c3)
                #            temp = xe.contract(comp, False, list_right[iter_0], False, 1)
                #            temp = xe.contract(temp, False, xe.Tensor.from_buffer(dfeat[iter_0]), False, 1)
                #            temp = xe.contract(temp, False, temp_left, False, 1)
                temp_left(c3) << temp_left(c1) * comp(
                    c1, c2, c3) * xe.Tensor.from_buffer(feat[iter_0])(c2)
                #            temp_left = xe.contract(temp_left, False, comp, False, 1)
                #            temp_left = xe.contract(xe.Tensor.from_buffer(feat[iter_0]), False, temp_left, False, 1)

                dV += temp[0]
            return dV + add_fun_const * self.laplacian_add_fun(t, x)
        else:
            nos = x.shape[1]
            feat = self.P_batch(x)
            dfeat = self.ddP_batch(x)
            dV_mat = np.zeros(shape=x.shape[-1])
            temp = np.zeros(1)
            temp_right = np.ones(shape=(1, nos))
            temp_left = np.ones(shape=(1, nos))
            list_right = [None] * (self.r)
            list_right[self.r - 1] = temp_right
            for iter_0 in range(self.r - 1, 0, -1):
                comp = V.get_component(iter_0).to_ndarray()
                #            temp_right(c1) << temp_right(c3) * comp(c1, c2, c3) * feat[iter_0](c2)
                list_right[iter_0 - 1] = np.einsum('kl,ijk,jl->il',
                                                   list_right[iter_0], comp,
                                                   feat[iter_0])
            for iter_0 in range(self.r):
                comp = V.get_component(iter_0).to_ndarray()
                #            temp() << temp_left(c1) * comp(c1, c2, c3) * dfeat[iter_0](c2) \
                #                * list_right[iter_0](c3)
                temp = np.einsum('il,ijk,jl,kl->l', temp_left, comp,
                                 dfeat[iter_0], list_right[iter_0])
                #            temp(c3) << temp_left(c1) * comp(c1, c2, c3) * feat[iter_0](c2)
                temp_left = np.einsum('il,ijk,jl->kl', temp_left, comp,
                                      feat[iter_0])
                dV_mat += temp

    #        _u = -gamma/lambd*np.dot(dV, B) - shift_TT
            return dV_mat + add_fun_const * self.laplacian_add_fun(t, x)
Exemple #18
0
ttA = xe.TTOperator(A)

# and verify its rank
print("ttA ranks:", ttA.ranks())

# the right hand side of the equation both as Tensor and in (Q)TT format
b = xe.Tensor.ones([2,]*9)
ttb = xe.TTTensor.ones(b.dimensions)

# construct a random initial guess of rank 3 for the ALS algorithm
ttx = xe.TTTensor.random([2,]*9, [3,]*8)

# and solve the system with the default ALS algorithm for symmetric positive operators
xe.ALS_SPD(ttA, ttx, ttb)

# to perform arithmetic operations we need to define some indices
i,j,k = xe.indices(3)

# calculate the residual of the just solved system to evaluate its accuracy
# here i^9 denotes a multiindex named i of dimension 9 (ie. spanning 9 indices of the respective tensors)
residual = xe.frob_norm( ttA(i^9,j^9)*ttx(j^9) - ttb(i^9) )
print("residual:", residual)

# as an comparison solve the system exactly using the Tensor / operator
x = xe.Tensor()
x(j^9) << b(i^9) / A(i^9, j^9)

# and calculate the Frobenius norm of the difference
print("error:", xe.frob_norm(x - xe.Tensor(ttx)))

Exemple #19
0
def construct_exact_fermit_pasta_single_TT(noo, p, beta):
    """Creates exact solution in single TT format for the Fermi Pasta problems in the monomials basis,
    for other basis functions this needs to be transformed!
    Parameters
    ----------
    noo: int
        number of dimensions
    p: int
        number of basis functions
    beta: float
        coefficient of the fermit pasta equation
    Returns
    -------
    Solution: xerus TTOperator
        Exact soulution of FPTU problem in monomials basis functions
    """
    dim = [p for i in range(0, noo)]
    dim.append(noo)
    Solution = xerus.TTTensor(dim)

    tmp = xerus.Tensor([1, 4, 4 * noo])
    for eq in range(noo):
        tmp[0, 0, 4 * eq] = 1
    tmp[0, 0, 0] = 0
    tmp[0, 1, 0] = -2
    tmp[0, 3, 0] = -2 * beta
    tmp[0, 0, 1] = 1
    tmp[0, 2, 1] = 3 * beta
    tmp[0, 1, 2] = -3 * beta
    tmp[0, 0, 3] = beta

    tmp[0, 0, 4] = 1
    tmp[0, 1, 5] = 1
    tmp[0, 2, 6] = 1
    tmp[0, 3, 7] = 1
    Solution.set_component(0, tmp)

    for comp in range(1, Solution.order() - 1):
        tmp = xerus.Tensor([4 * noo, 4, 4 * noo])
        for eq in range(noo):
            tmp[4 * eq, 0, 4 * eq] = 1
        if (comp + 1) * 4 < 4 * noo:
            tmp[4 * (comp + 1), 0, 4 * (comp + 1)] = 1
            tmp[4 * (comp + 1), 1, 4 * (comp + 1) + 1] = 1
            tmp[4 * (comp + 1), 2, 4 * (comp + 1) + 2] = 1
            tmp[4 * (comp + 1), 3, 4 * (comp + 1) + 3] = 1

        tmp[4 * comp, 0, 4 * comp] = 0
        tmp[4 * comp, 1, 4 * comp] = -2
        tmp[4 * comp, 3, 4 * comp] = -2 * beta
        tmp[4 * comp + 1, 0, 4 * comp] = 1
        tmp[4 * comp + 1, 2, 4 * comp] = 3 * beta
        tmp[4 * comp + 2, 1, 4 * comp] = -3 * beta
        tmp[4 * comp + 3, 0, 4 * comp] = beta
        tmp[4 * comp, 0, 4 * comp + 1] = 1
        tmp[4 * comp, 2, 4 * comp + 1] = 3 * beta
        tmp[4 * comp, 1, 4 * comp + 2] = -3 * beta
        tmp[4 * comp, 0, 4 * comp + 3] = beta

        tmp[4 * (comp - 1), 0, 4 * (comp - 1)] = 1
        tmp[4 * (comp - 1) + 1, 1, 4 * (comp - 1)] = 1
        tmp[4 * (comp - 1) + 2, 2, 4 * (comp - 1)] = 1
        tmp[4 * (comp - 1) + 3, 3, 4 * (comp - 1)] = 1

        Solution.set_component(comp, tmp)

    tmp = xerus.Tensor([4 * noo, noo, 1])
    for eq in range(noo):
        tmp[4 * eq, eq, 0] = 1
    Solution.set_component(Solution.order() - 1, tmp)
    Solution.round(0.0)
    return Solution
Exemple #20
0
def create_L():
    L = xe.Tensor([MAX_NUM_PER_SITE, MAX_NUM_PER_SITE])
    for i in xrange(MAX_NUM_PER_SITE):
        L[[i, i]] = i / (i + 5.0)
    return L
Exemple #21
0
        def runCostsTest():
            print(
                f"Number of samples:        {num_samples:>{len(str(num_samples))}d}"
            )
            print(
                f"Number of assets:         {num_assets:>{len(str(num_samples))}d}"
            )
            print(
                f"Number of exercise dates: {num_exercise_dates:>{len(str(num_samples))}d}"
            )
            print(
                f"Chaos degree:             {degree:>{len(str(num_samples))}d}"
            )
            print(
                f"Dimension:                {dimension:>{len(str(num_samples))}d}"
            )
            print(
                f"Ranks:                    {rank:>{len(str(num_samples))}d}")
            print(
                f"Spot:                     {spot:>{len(str(num_samples))}.1f}"
            )
            print(
                f"Strike:                   {strike:>{len(str(num_samples))}.1f}"
            )
            print()

            # Compute the samples for Monte-Carlo integration.
            increments, asset_values, values = compute_discounted_payoff_PutAmer(
                num_assets=num_assets,
                num_steps=num_exercise_dates - 1,
                num_samples=num_samples,
                spot=np.full(num_assets, spot),
                strike=strike,
                trend=np.zeros(num_assets),
                volatility=np.full(num_assets, 0.2),
                correlation=0.2)

            measures = hermite_measures(increments, degree)
            assert values.shape == (num_samples, num_exercise_dates)
            assert measures.shape == (num_exercise_dates - 1, num_samples,
                                      dimension)

            # Define a random initial value.
            tt = xe.TTTensor.random([dimension] * (num_exercise_dates - 1),
                                    [rank] * (num_exercise_dates - 2))
            arr = xe.Tensor(tt).to_ndarray()
            arr[(0, ) * (num_exercise_dates - 1)] = 0

            carr = costs_array(arr, measures, values)
            print(
                f"Costs[array]:     {carr[0]:.2e} \u00B1 {np.sqrt(carr[1]/num_samples):.2e}"
            )
            for aexp in range(5):
                ctt = costs(tt, measures, values, 10**aexp)
                print(
                    f"Costs[tt|\u03b1=1e{aexp:02d}]: {ctt[0]:.2e} \u00B1 {np.sqrt(ctt[1]/num_samples):.2e}"
                )
                print(
                    f"Errors:           {abs(carr[0]-ctt[0]):.2e} & {abs(np.sqrt(carr[1]/num_samples)-np.sqrt(ctt[1]/num_samples)):.2e}"
                )
            print()
Exemple #22
0
def run_als(noo, nos, C1, C2list, Alist, C1ex, Y, max_iter, lam):
    """Perform the regularized ALS simulation
    Parameters
    ----------
    noo: int
        number of dimension
    nos: int
        number of samples
    C1: xerus TTTensor
        iterate tensor
    C2list: list of xerus tensor
        selection tensor
    Alist: list of xerus tensor
        dictionary tensor
    C1ex: xerus TTTensor
        exact solution
    Y: xerus Tensor
        right hand side
    max_iter: int
        number of iterations
    lam: float
        regularization parameter      
    Returns
    -------
    errors: list of floats
        list of relative error after each sweep
    """
    i1, i2, i3, i4, i5, i6, j1, j2, j3, j4, k1, k2, k3, k4 = xerus.indices(14)
    diff = C1ex - C1
    tmp1 = xerus.Tensor()
    l = np.ones([noo, 1, 1, noo])
    r = np.ones([noo, 1, 1, noo])
    for i in range(noo):
        t1 = diff.get_component(i)
        t2 = C2list[i]
        tmp1(i1, i2, i3, j1, j2, j3) << t2(k1, i3) * t1(i1, k2, k1, j1) * t1(
            i2, k2, k3, j2) * t2(k3, j3)
        tmp1np = tmp1.to_ndarray()
        l = np.einsum('dije,ijdkle->dkle', l, tmp1np)
    lr = np.sqrt(np.einsum('ijkl,ijkl->', l, r)) / C1ex.frob_norm()
    errors = [lr]
    errors2 = [1]
    lams = [lam]
    # Initialize stacks
    rStack = [xerus.Tensor.ones([noo, 1, nos])]
    lStack = [xerus.Tensor.ones([noo, 1, nos])]
    for ind in range(noo - 1, 0, -1):
        C1_tmp = C1.get_component(ind)
        C2_tmp = C2list[ind]
        A_tmp = Alist[ind]
        C1_tmp(i1, i2, i3,
               i4) << C1_tmp(i1, k1, k2, i4) * A_tmp(k1, i2) * C2_tmp(k2, i3)
        rstacknp = rStack[-1].to_ndarray()
        C1_tmpnp = C1_tmp.to_ndarray()
        rstacknpres = np.einsum('imdj,djm->dim', C1_tmpnp, rstacknp)
        rStack.append(xerus.Tensor.from_ndarray(rstacknpres))

    forward = True
    mem = -1
    for it in range(0, max_iter):
        for pos in chain(range(0, noo), range(noo - 1, -1, -1)):
            if mem == pos:
                forward = not forward
            op = xerus.Tensor()
            rhs = xerus.Tensor()
            C2i = C2list[pos]
            Ai = Alist[pos]
            Ainp = Ai.to_ndarray()
            C2inp = C2i.to_ndarray()

            lStacknp = lStack[-1].to_ndarray()
            rStacknp = rStack[-1].to_ndarray()

            op_pre_np = np.einsum('dim,pm,sd,djm->ipsjmd', lStacknp, Ainp,
                                  C2inp, rStacknp)
            op_pre = xerus.Tensor.from_ndarray(op_pre_np)

            op(i1, i2, i3, i4, j1, j2, j3, j4) << op_pre(
                i1, i2, i3, i4, k1, k2) * op_pre(j1, j2, j3, j4, k1, k2)

            rhs(i1, i2, i3, i4) << op_pre(i1, i2, i3, i4, k1, k2) * Y(k2, k1)
            op += lam * xerus.Tensor.identity(op.dimensions)

            op_arr = op.to_ndarray()
            rhs_arr = rhs.to_ndarray()

            op_dim = op.dimensions
            op_arr_reshape = op_arr.reshape(
                (op_dim[0] * op_dim[1] * op_dim[2] * op_dim[3],
                 op_dim[4] * op_dim[5] * op_dim[6] * op_dim[7]))
            rhs_dim = rhs.dimensions
            rhs_arr_reshape = rhs_arr.reshape(
                (rhs_dim[0] * rhs_dim[1] * rhs_dim[2] * rhs_dim[3]))
            sol_arr = np.linalg.solve(op_arr_reshape, rhs_arr_reshape)

            sol_arr_reshape = sol_arr.reshape(
                (op.dimensions[0], op.dimensions[1], op.dimensions[2],
                 op.dimensions[3]))
            sol = xerus.Tensor.from_ndarray(sol_arr_reshape)
            C1.set_component(pos, sol)

            Ax = xerus.Tensor()
            Ax(i2, i1) << op_pre(j1, j2, j3, j4, i1, i2) * sol(j1, j2, j3, j4)
            error = (Ax - Y).frob_norm() / (Y.frob_norm())
            error2 = C1.frob_norm()

            if forward and pos < noo - 1:
                C1.move_core(pos + 1, True)
                C1_tmp = C1.get_component(pos)
                rStack = rStack[:-1]
                C1_tmp(
                    i1, i2, i3,
                    i4) << C1_tmp(i1, k1, k2, i4) * Ai(k1, i2) * C2i(k2, i3)
                lstacknp = lStack[-1].to_ndarray()
                C1_tmpnp = C1_tmp.to_ndarray()
                lstacknpres = np.einsum('dim,imdj->djm', lstacknp, C1_tmpnp)
                lStack.append(xerus.Tensor.from_ndarray(lstacknpres))
            if not forward and pos > 0:
                C1.move_core(pos - 1, True)
                C1_tmp = C1.get_component(pos)
                lStack = lStack[:-1]
                C1_tmp(
                    i1, i2, i3,
                    i4) << C1_tmp(i1, k1, k2, i4) * Ai(k1, i2) * C2i(k2, i3)
                rstacknp = rStack[-1].to_ndarray()
                C1_tmpnp = C1_tmp.to_ndarray()
                rstacknpres = np.einsum('imdj,djm->dim', C1_tmpnp, rstacknp)
                rStack.append(xerus.Tensor.from_ndarray(rstacknpres))

            mem = pos
        #end of iteration
        #lam = lam/10
        lam = np.max([np.min([0.1 * error / C1.frob_norm(), lam / 4]), 1e-14])
        diff = C1ex - C1
        tmp1 = xerus.Tensor()
        l = np.ones([noo, 1, 1, noo])
        r = np.ones([noo, 1, 1, noo])
        for i in range(noo):
            t1 = diff.get_component(i)
            t2 = C2list[i]
            tmp1(i1, i2, i3, j1, j2, j3) << t2(k1, i3) * t1(
                i1, k2, k1, j1) * t1(i2, k2, k3, j2) * t2(k3, j3)
            tmp1np = tmp1.to_ndarray()
            l = np.einsum('dije,ijdkle->dkle', l, tmp1np)
        lr = np.sqrt(np.einsum('ijkl,ijkl->', l, r)) / C1ex.frob_norm()
        print("Iteration " + str(it) + ' Error: ' + str(lr) + " Residual: " +
              str(error) + " Norm: " + str(C1.frob_norm()) + " Lambda: " +
              str(lam))

        errors.append(lr)
        errors2.append(error)
        lams.append(lam)

    return errors
Exemple #23
0
def update_components_salsa(G, noo, d, Alist, nos, Y, smin, w, kminor, adapt,
                            mR, maxranks):
    """Perform one SALSA sweep
    Parameters
    ----------
    G: xerus TTTensor
        iterate tensor
    noo: int
        number of dimension
    d: int
        number of dimension plus 1
    Alist: list of xerus tensor
        dictionary tensor   
    nos: int
        number of samples
    Y: xerus Tensor
        right hand side
    smin: float
        SALSA parameter smin
    w: float
        SALSA parameter omega
    kminor: int
        SALSA parameter number of additional ranks used in each simulation  
    adapt: bool
        if adaption should be used if necessary
    mR: list of int
        list of maximal ranks overall
    maxranks: int
        maximal rank alowed
    
    Returns
    -------
    error: float
        Residuum of iterate
    """

    p = Alist[0].dimensions[0]
    Smu_left, Gamma, Smu_right, Theta, U_left, U_right, Vt_left, Vt_right = (
        xerus.Tensor() for i in range(8))
    i1, i2, i3, i4, i5, i6, j1, j2, j3, j4, k1, k2, k3 = xerus.indices(13)
    tmp = xerus.Tensor()

    # building Stacks for operators
    lStack = [xerus.Tensor.ones([1, nos])]
    rStack = [xerus.Tensor.ones([1, nos])]

    G_tmp = G.get_component(noo)
    tmp(i1, i2, i3) << G_tmp(i1, i3, k1) * rStack[-1](k1, i2)
    rStack.append(tmp)
    for ind in range(d - 2, 0, -1):
        G_tmp = G.get_component(ind)
        A_tmp = Alist[ind]
        G_tmp(i1, i2, i3) << G_tmp(i1, k1, i3) * A_tmp(k1, i2)
        rstacknp = rStack[-1].to_ndarray()
        G_tmpnp = G_tmp.to_ndarray()
        rstacknpres = np.einsum('jmk,kms->jms', G_tmpnp, rstacknp)
        rStack.append(xerus.Tensor.from_ndarray(rstacknpres))

    #loop over each component from left to right
    for mu in range(0, d):
        # get singular values and orthogonalize wrt the next core mu
        if mu > 0:
            # get left and middle component
            Gmu_left = G.get_component(mu - 1)
            Gmu_middle = G.get_component(mu)
            (U_left(i1, i2, k1), Smu_left(k1, k2), Vt_left(
                k2, i3)) << xerus.SVD(Gmu_left(i1, i2, i3))
            Gmu_middle(i1, i2, i3) << Vt_left(i1, k2) * Gmu_middle(k2, i2, i3)
            #for j in range(kmin):
            if G.ranks()[mu-1] < np.min([maxranks[mu-1],mR]) and adapt \
                and Smu_left[int(np.max([Smu_left.dimensions[0] - kminor,0])),int(np.max([int(Smu_left.dimensions[1] - kminor),0]))] > smin:
                U_left, Smu_left, Gmu_middle = adapt_ranks(
                    U_left, Smu_left, Gmu_middle, smin)
            sing = [Smu_left[i, i] for i in range(Smu_left.dimensions[0])]

            Gmu_middle(i1, i2, i3) << Smu_left(i1, k1) * Gmu_middle(k1, i2, i3)
            G.set_component(mu - 1, U_left)
            G.set_component(mu, Gmu_middle)
            Gamma = xerus.Tensor(
                Smu_left.dimensions)  # build cut-off sing value matrix Gamma
            for j in range(Smu_left.dimensions[0]):
                Gamma[j, j] = 1 / np.max([smin, Smu_left[j, j]])
        if mu < d - 1:
            # get middle and rightcomponent
            Gmu_middle = G.get_component(mu)
            Gmu_right = G.get_component(mu + 1)
            (U_right(i1, i2, k1), Smu_right(k1, k2), Vt_right(
                k2, i3)) << xerus.SVD(Gmu_middle(i1, i2, i3))

            sing = [Smu_right[i, i] for i in range(Smu_right.dimensions[0])]
            Gmu_right(i1, i2, i3) << Vt_right(i1, k1) * Gmu_right(k1, i2, i3)
            #if mu == d-2 and G.ranks()[mu] < maxranks[mu] and adapt and Smu_right[Smu_right.dimensions[0] - kminor,Smu_right.dimensions[1] - kminor] > smin:
            #    U_right, Smu_right, Gmu_right  = adapt_ranks(U_right, Smu_right, Gmu_right,smin)
            Gmu_middle(i1, i2, i3) << U_right(i1, i2, k1) * Smu_right(k1, i3)
            G.set_component(mu, Gmu_middle)
            G.set_component(mu + 1, Gmu_right)
            Theta = xerus.Tensor([
                Gmu_middle.dimensions[2], Gmu_middle.dimensions[2]
            ])  # build cut-off sing value matrix Theta
            for j in range(Theta.dimensions[0]):
                if j >= Smu_right.dimensions[0]:
                    sing_val = 0
                else:
                    singval = Smu_right[j, j]
                Theta[j, j] = 1 / np.max([smin, singval])

        #update Stacks
        if mu > 0:
            G_tmp = G.get_component(mu - 1)
            A_tmp = Alist[mu - 1]
            G_tmp(i1, i2, i3) << G_tmp(i1, k1, i3) * A_tmp(k1, i2)
            lstacknp = lStack[-1].to_ndarray()
            G_tmpnp = G_tmp.to_ndarray()
            lstacknpres = np.einsum('jm,jmk->km', lstacknp, G_tmpnp)
            lStack.append(xerus.Tensor.from_ndarray(lstacknpres))
            rStack = rStack[:-1]

        op = xerus.Tensor()
        op_pre = xerus.Tensor()
        op_reg = xerus.Tensor()
        rhs = xerus.Tensor()
        Gi = G.get_component(mu)
        if mu != d - 1:
            Ai = Alist[mu]
            Ainp = Ai.to_ndarray()
            lStacknp = lStack[-1].to_ndarray()
            rStacknp = rStack[-1].to_ndarray()
            op_pre_np = np.einsum('im,jm,kms->ijkms', lStacknp, Ainp, rStacknp)
            op_pre = xerus.Tensor.from_ndarray(op_pre_np)
            op(i1, i2, i3, j1, j2,
               j3) << op_pre(i1, i2, i3, k1, k2) * op_pre(j1, j2, j3, k1, k2)
            rhs(i1, i2, i3) << op_pre(i1, i2, i3, k1, k2) * Y(k2, k1)
        else:
            tmp_id = xerus.Tensor.identity([noo, 1, noo, 1])
            tmp_ones = xerus.Tensor.ones([1])
            op(i1, i2, i3, j1, j2, j3) << lStack[-1](i1, k1) * lStack[-1](
                j1, k1) * tmp_id(i2, i3, j2, j3)
            rhs(i1, i2, i3) << lStack[-1](i1, k1) * Y(i2, k1) * tmp_ones(i3)

        if mu < d - 1:
            id_reg_p = xerus.Tensor.identity([p, p])
        else:
            id_reg_p = xerus.Tensor.identity([noo, noo])
        if mu > 0:
            id_reg_r = xerus.Tensor.identity(
                [Gi.dimensions[2], Gi.dimensions[2]])
            op_reg(i1, i2, i3, j1, j2, j3) << Gamma(i1, k1) * Gamma(
                k1, j1) * id_reg_r(i3, j3) * id_reg_p(i2, j2)
            op += w * w * op_reg
        if mu < d - 1:
            id_reg_l = xerus.Tensor.identity(
                [Gi.dimensions[0], Gi.dimensions[0]])
            op_reg(i1, i2, i3, j1, j2, j3) << Theta(i3, k1) * Theta(
                k1, j3) * id_reg_l(i1, j1) * id_reg_p(i2, j2)
            op += w * w * op_reg
        #if mu > 0 and mu < d - 1:
        #    op_reg(i1,i2,i3,j1,j2,j3) << Theta(i3,k1) * Theta(k1,j3) * Gamma(i1,k1) * Gamma(k1,j1)  * id_reg_p(i2,j2)
        #    op += w*w *w*w* op_reg

        op_arr = op.to_ndarray()
        rhs_arr = rhs.to_ndarray()
        gi_arr = Gi.to_ndarray()

        op_dim = op.dimensions
        op_arr_reshape = op_arr.reshape((op_dim[0] * op_dim[1] * op_dim[2],
                                         op_dim[3] * op_dim[4] * op_dim[5]))
        rhs_dim = rhs.dimensions
        rhs_arr_reshape = rhs_arr.reshape(
            (rhs_dim[0] * rhs_dim[1] * rhs_dim[2]))
        gi_dim = Gi.dimensions
        gi_arr_reshape = gi_arr.reshape((gi_dim[0] * gi_dim[1] * gi_dim[2]))

        sol_arr = np.linalg.solve(op_arr_reshape, rhs_arr_reshape)

        sol_arr_reshape = sol_arr.reshape((gi_dim[0], gi_dim[1], gi_dim[2]))
        sol = xerus.Tensor.from_ndarray(sol_arr_reshape)
        G.set_component(mu, sol)

        if mu != d - 1:
            Ax = xerus.Tensor()
            Ax(i2, i1) << op_pre(j1, j2, j3, i1, i2) * sol(j1, j2, j3)
            error = (Ax - Y).frob_norm() / Y.frob_norm()

            #print("mu=" + str(mu) +'\033[1m'+" e=" + str(error)  +'\033[0m'+ ' nG=' + str(G.frob_norm()) + ' sing ' + str(sing[-kmin]))
    return error