Exemplo n.º 1
0
def implicit_euler(A, x, stepSize, n):
    op = xe.TTOperator.identity(A.dimensions) - stepSize * A

    j, k = xe.indices(2)
    ourALS = xe.ALS_SPD
    ourALS.convergenceEpsilon = 1e-4
    ourALS.numHalfSweeps = 100

    results = [x]
    nextX = xe.TTTensor(x)

    for i in xrange(n):
        ourALS(op, nextX, x)

        # normalize
        norm = one_norm(nextX)
        nextX /= norm

        print("done itr", i, \
         "residual:", xe.frob_norm(op(j/2,k/2)*nextX(k&0) - x(j&0)), \
         "one-norm:", norm)

        x = xe.TTTensor(nextX)  # ensure it is a copy
        results.append(x)

    return results
Exemplo n.º 2
0
def costs(_tt, _measures, _values, _alpha=50):
    """
    Compute Monte-Carlo estimates of the first moment E[f(T,•,•)] and the second moment E[f(T,•,•)²].

    Parameters
    ----------
    _tt : xe.TTTensor
        The tensor T for which the costs are computed.
    _measures, _values : np.ndarray
        The samples xₖ and yₖ used for Monte-Carlo estimation.
    _alpha : float
        Smoothness parameter for the smooth maximum function. (default: 1.0)

    Returns
    -------
    float, float
        The first and second moments.
    """
    _num_steps = _tt.order()
    assert len(_measures) == _num_steps
    _num_samples = _measures[0].shape[0]
    dimensions = _tt.dimensions
    for m in range(_num_steps):
        assert _measures[m].ndim == 2 and _measures[m].shape[
            0] == _num_samples and _measures[m].shape[1] >= dimensions[m]
    assert _values.shape == (_num_samples, _num_steps + 1)

    assert isinstance(_measures, np.ndarray)
    measures = _measures.view()
    measures.flags.writeable = False
    values = _values.view()
    values.flags.writeable = False

    leftStack0 = [None] * (_num_steps + 1)
    leftStack0[0] = np.ones((1, ))
    leftStack0[0].flags.writeable = False
    for m in range(1, _num_steps + 1):
        leftStack0[m] = leftStack0[m - 1] @ _tt.get_component(
            m - 1).to_ndarray()[:, 0, :]
        leftStack0[m].flags.writeable = False

    rightStack = [None] * (_num_steps + 1)
    rightStack[_num_steps] = np.ones((1, ))
    rightStack[_num_steps].flags.writeable = False
    for m in reversed(range(_num_steps)):
        rightStack[m] = _tt.get_component(
            m).to_ndarray()[:, 0, :] @ rightStack[m + 1]
        rightStack[m].flags.writeable = False

    cpucount = cpu_count()
    chunksize = _num_samples // cpucount + (_num_samples % cpucount != 0)
    assert chunksize * cpucount >= _num_samples
    arguments = [(x * chunksize, min(
        (x + 1) * chunksize, _num_samples), xe.TTTensor(_tt), measures, values,
                  _alpha, leftStack0, rightStack) for x in range(cpucount)]
    with Pool() as p:
        maxDiffs = p.map(maxDiffSegments, arguments, chunksize=1)
    maxDiffs = np.sum(maxDiffs, axis=0) / _num_samples
    assert maxDiffs.shape == (2, )
    return maxDiffs
Exemplo n.º 3
0
 def to_TTTensor(self):
     # This is the tangent vector T on TT manifolds at point X:
     # Each tangent vector can be written as
     #     T = dX[0]*V[1]*...*V[d-1] + U[0]*dX[1]*V[2]*...*V[d-1] + ... + U[0]*...*U[d-2]*dX[d-1]
     # where the dX[k] are arbitrary component tensors and the U[j] and V[j] are the complenent tensors of the left and right canonicalized TT tensor X.
     # Note that the k-th summand is just X with the core at position k and the the k-th component replaced by dX[k].
     # The rank of these tensors is bounded by 2*rank(X).
     # This can be seen by the following construction:
     #     T = [[U[0], dX[0]]] * [[U[1], dX[1]], [0, V[1]]] * ... * [[dX[d-1]], [V[d-1]]]
     # Using this construction we can write this as:
     ret = xe.TTTensor(self.dimensions)
     if self.order == 1:
         # ret = xe.TTTensor([self.dX[0].shape[1]])
         ret.set_component(0, xe.Tensor.from_buffer(self.dX[0]))
         return ret
     Um = np.asarray(self.space.U(0))
     dXm = self.dX[0]
     ret.set_component(0, xe.Tensor.from_buffer(np.block([[[dXm, Um]]])))
     for m in range(1, self.order - 1):
         Um = np.asarray(self.space.U(m))
         Vm = np.asarray(self.space.V(m))
         dXm = self.dX[m]
         Zm = np.zeros((Vm.shape[0], Um.shape[1], Um.shape[2]))
         ret.set_component(
             m, xe.Tensor.from_buffer(np.block([[[Vm, Zm]], [[dXm, Um]]])))
     Vm = np.asarray(self.space.V(self.order - 1))
     dXm = self.dX[self.order - 1]
     ret.set_component(self.order - 1,
                       xe.Tensor.from_buffer(np.block([[[Vm]], [[dXm]]])))
     ret.canonicalize_left()
     return ret
Exemplo n.º 4
0
        def coreProjection(_tt, _k):
            assert 0 <= _k < _tt.order()
            assert _tt.dimensions == tt.dimensions
            ret = xe.TTTensor(tt)
            ret.move_core(_k)

            leftContraction = np.ones((1, 1))
            for pos in range(_k):
                leftContraction = np.einsum(
                    'lk, ler, kes -> rs', leftContraction,
                    ret.get_component(pos).to_ndarray(),
                    _tt.get_component(pos).to_ndarray())

            rightContraction = np.ones((1, 1))
            for pos in reversed(range(_k + 1, tt.order())):
                rightContraction = np.einsum(
                    'ler, kes, rs -> lk',
                    ret.get_component(pos).to_ndarray(),
                    _tt.get_component(pos).to_ndarray(), rightContraction)

            core = np.einsum('lk, kes, rs -> ler', leftContraction,
                             _tt.get_component(_k).to_ndarray(),
                             rightContraction)
            ret.set_component(_k, xe.Tensor.from_buffer(core))
            return ret
Exemplo n.º 5
0
def costs_component_gradient_fd(_tt, _mode, _measures, _values, _h=1e-8):
    val0 = costs(_tt, _measures, _values)[0]
    test = xe.TTTensor(_tt)
    ret = xe.Tensor(_tt.get_component(_mode).dimensions)
    for I in range(ret.size):
        testCore = xe.Tensor(_tt.get_component(_mode))
        testCore[I] += _h
        test.set_component(_mode, testCore)
        valI = costs(test, _measures, _values)[0]
        ret[I] = (valI - val0) / _h
    return ret.to_ndarray()
Exemplo n.º 6
0
def costs_riemannian_gradient(_tt, _measures, _values):
    """
    Computes the Riemannian gradient of the cost functional.

    Parameters
    ----------
    _tt : xe.TTTensor
        The point at which the gradient is to be computed.
    _measures, _values : np.ndarray
        The samples used for Monte-Carlo estimation.

    Returns
    -------
    TangentVector
        The gradient.
    """

    #TODO: das geht schneller, wenn man Us, Vs und Xs speichert und den TangentSpace manuell aufbaut.
    gradient = xe.TTTensor(_tt.dimensions)
    _tt.move_core(0)
    for m in range(_tt.order()):
        core = costs_component_gradient(_tt, m, _measures, _values)
        cg = xe.TTTensor(_tt)
        if m < _tt.order() - 1:
            _tt.move_core(m + 1)
            Um = _tt.get_component(m).to_ndarray()
            core -= np.einsum('lex, yzx, yzr -> ler', Um, Um, core)
        cg.set_component(m, xe.Tensor.from_buffer(core))
        gradient = gradient + cg

    ts = TangentSpace(_tt)
    tv = ts.project(gradient)
    assert xe.frob_norm(tv.to_TTTensor() -
                        gradient) <= 1e-12 * xe.frob_norm(gradient)

    # project out the part in direction [0, ..., 0]
    tp = ts.project(xe.TTTensor.dirac(_tt.dimensions, [0] * _tt.order()))
    tv = (tv - (tv @ tp) / tp.norm()**2 * tp)
    return tv
Exemplo n.º 7
0
Arquivo: als.py Projeto: lljbash/xerus
			
			
			# right -> left, only move core and update stack
			self.x.move_core(0, True)
			for pos in reversed(xrange(1,self.d)) :
				self.push_right_stack(pos)
				self.leftAStack.pop()
				self.leftBStack.pop()


def simpleALS(A, x, b) :
	solver = InternalSolver(A, x, b)
	solver.solve()

if __name__ == "__main__":
	i,j,k = xe.indices(3)
	
	A = xe.TTOperator.random([4]*16, [2]*7)
	A(i/2,j/2) << A(i/2, k/2) * A(j/2, k/2)
	
	solution = xe.TTTensor.random([4]*8, [3]*7)
	b = xe.TTTensor()
	b(i&0) << A(i/2, j/2) * solution(j&0)
	
	x = xe.TTTensor.random([4]*8, [3]*7)
	simpleALS(A, x, b)
	
	print("Residual:", xe.frob_norm(A(i/2, j/2) * x(j&0) - b(i&0))/xe.frob_norm(b))
	print("Error:", xe.frob_norm(solution-x)/xe.frob_norm(x))
	
Exemplo n.º 8
0
def construct_exact_fermit_pasta_single_TT(noo, p, beta):
    """Creates exact solution in single TT format for the Fermi Pasta problems in the monomials basis,
    for other basis functions this needs to be transformed!
    Parameters
    ----------
    noo: int
        number of dimensions
    p: int
        number of basis functions
    beta: float
        coefficient of the fermit pasta equation
    Returns
    -------
    Solution: xerus TTOperator
        Exact soulution of FPTU problem in monomials basis functions
    """
    dim = [p for i in range(0, noo)]
    dim.append(noo)
    Solution = xerus.TTTensor(dim)

    tmp = xerus.Tensor([1, 4, 4 * noo])
    for eq in range(noo):
        tmp[0, 0, 4 * eq] = 1
    tmp[0, 0, 0] = 0
    tmp[0, 1, 0] = -2
    tmp[0, 3, 0] = -2 * beta
    tmp[0, 0, 1] = 1
    tmp[0, 2, 1] = 3 * beta
    tmp[0, 1, 2] = -3 * beta
    tmp[0, 0, 3] = beta

    tmp[0, 0, 4] = 1
    tmp[0, 1, 5] = 1
    tmp[0, 2, 6] = 1
    tmp[0, 3, 7] = 1
    Solution.set_component(0, tmp)

    for comp in range(1, Solution.order() - 1):
        tmp = xerus.Tensor([4 * noo, 4, 4 * noo])
        for eq in range(noo):
            tmp[4 * eq, 0, 4 * eq] = 1
        if (comp + 1) * 4 < 4 * noo:
            tmp[4 * (comp + 1), 0, 4 * (comp + 1)] = 1
            tmp[4 * (comp + 1), 1, 4 * (comp + 1) + 1] = 1
            tmp[4 * (comp + 1), 2, 4 * (comp + 1) + 2] = 1
            tmp[4 * (comp + 1), 3, 4 * (comp + 1) + 3] = 1

        tmp[4 * comp, 0, 4 * comp] = 0
        tmp[4 * comp, 1, 4 * comp] = -2
        tmp[4 * comp, 3, 4 * comp] = -2 * beta
        tmp[4 * comp + 1, 0, 4 * comp] = 1
        tmp[4 * comp + 1, 2, 4 * comp] = 3 * beta
        tmp[4 * comp + 2, 1, 4 * comp] = -3 * beta
        tmp[4 * comp + 3, 0, 4 * comp] = beta
        tmp[4 * comp, 0, 4 * comp + 1] = 1
        tmp[4 * comp, 2, 4 * comp + 1] = 3 * beta
        tmp[4 * comp, 1, 4 * comp + 2] = -3 * beta
        tmp[4 * comp, 0, 4 * comp + 3] = beta

        tmp[4 * (comp - 1), 0, 4 * (comp - 1)] = 1
        tmp[4 * (comp - 1) + 1, 1, 4 * (comp - 1)] = 1
        tmp[4 * (comp - 1) + 2, 2, 4 * (comp - 1)] = 1
        tmp[4 * (comp - 1) + 3, 3, 4 * (comp - 1)] = 1

        Solution.set_component(comp, tmp)

    tmp = xerus.Tensor([4 * noo, noo, 1])
    for eq in range(noo):
        tmp[4 * eq, eq, 0] = 1
    Solution.set_component(Solution.order() - 1, tmp)
    Solution.round(0.0)
    return Solution
Exemplo n.º 9
0
 def __init__(self, _tt):
     self._U = xe.TTTensor(_tt)
     self._U.move_core(_tt.order() - 1)
     self._V = xe.TTTensor(_tt)
     self._V.move_core(0)
Exemplo n.º 10
0
def compute_and_cache_solution(params, maxIter=100):
    trainingSet = slice(0, params.num_training_samples, 1)
    validationSet = slice(
        params.num_training_samples,
        params.num_training_samples + params.num_validation_samples, 1)

    sol = compute_and_cache_measures_and_values(params)
    measures, values = sol.value
    time = sol.time

    sol = compute_and_cache_initial_guess(params, maxIter)
    tt = sol.value
    time += sol.time

    ranks = [params.chaos_rank] * (params.num_exercise_dates - 2)

    # Define convenience functions.
    def training_costs(_tt):
        return costs(_tt, measures[:, trainingSet], values[trainingSet])[0]

    def training_costs_gradient(_tt):
        return costs_riemannian_gradient(_tt, measures[:, trainingSet],
                                         values[trainingSet])

    def validation_costs(_tt):
        return costs(_tt,
                     measures[:, validationSet],
                     values[validationSet],
                     _alpha=1e3)[0]

    # compute_descentDir = lambda curGrad, prevGrad: curGrad  # GD
    # compute_descentDir = lambda curGrad, prevGrad: 0.5*(curGrad + prevGrad)  # Momentum
    def compute_descentDir(curGrad, prevGrad):  # nonlinear CG update
        """
        Nonlinear CG update.

        The Hestenes-Stiefel (HS) update can be derived by demanding that consecutive search directions be conjugate
        with respect to the average Hessian over the line segment [x_k , x_{k+1}].
        Even though it is a natural choice it is not easy to implement on Manifolds.
        The Polak-Ribiere (PR) update is similar to HS, both in terms of theoretical convergence properties and practical performance.
        For PR however, the strong Wolfe conditions does not guarantee that the computed update direction
        is always a descent direction. To guarantee this we modify PR to PR+. This choice also provides a direction reset automatically [2].
        Finally, it can be shown that global convergence can be guaranteed for every parameter that is bounded in absolute value by the Fletcher-Reeves update.
        This leads us to the final update rule max{PR+,FR}.
        To ensure that a descent direction is returned even with Armijo updates we check that the computed update direction
        does not point in the opposite direction to the gradient.

        References:
        -----------
          - [1] Numerical optimization (Jorge Nocedal and Stephen J. Wright)
          - [2] An Introduction to the Conjugate Gradient Method Without the Agonizing Pain (Jonathan Richard Shewchuk)
        """
        gradDiff = curGrad - prevGrad
        betaPR = (curGrad @ gradDiff) / (curGrad @ curGrad
                                         )  # Polak-Ribiere update
        beta = max(betaPR, 0)  # PR+ update
        betaFR = (curGrad @ curGrad) / (prevGrad @ prevGrad
                                        )  # Fletcher-Reeves update
        beta = min(beta, betaFR)  # max{PR+,FR} update
        descentDir = curGrad + beta * prevGrad
        if descentDir @ curGrad < 1e-3 * descentDir.norm() * curGrad.norm():
            print("WARNING: Computed descent direction opposite to gradient.")
            descentDir = curGrad
        return descentDir

    print("=" * 80)
    print("  Perform gradient descent")
    print("=" * 80)
    tic = process_time()
    trnCosts = training_costs(tt)
    valCosts = deque(maxlen=10)
    grad = training_costs_gradient(tt)
    print(
        f"[0] Training costs: {trnCosts: .4e}  |  Validation costs: {validation_costs(tt): .4e}  |  Best validation costs: {np.nan: .4e}  |  Relative gradient norm: {grad.norm()/xe.frob_norm(tt):.2e}  |  Relative update norm: {np.nan:.2e}  |  Step size: {np.nan:.2e}  |  Relative retraction error: {np.nan:.2e}  |  Ranks: {tt.ranks()}"
    )
    ss = 1
    descentDir = grad
    descentDirGrad = descentDir @ grad
    bestValCosts = np.inf
    bestTT = None
    for iteration in range(maxIter):
        if grad.norm() < 1e-6 * xe.frob_norm(tt):
            print(
                "Termination: relative norm of gradient deceeds tolerance (local minimum reached)"
            )
            break
        prev_tt = tt
        tt, re, ss = armijo_step(retraction(tt,
                                            descentDir,
                                            _roundingParameter=ranks),
                                 training_costs,
                                 descentDirGrad,
                                 _initialStepSize=ss)
        trnCosts = training_costs(tt)
        valCosts.append(validation_costs(tt))
        if valCosts[-1] < bestValCosts:
            bestTT = xe.TTTensor(tt)
            bestValCosts = valCosts[-1]
        print(
            f"[{iteration+1}] Training costs: {trnCosts: .4e}  |  Validation costs: {valCosts[-1]: .4e}  |  Best validation costs: {bestValCosts: .4e}  |  Relative gradient norm: {grad.norm()/np.asarray(xe.frob_norm(prev_tt)):.2e}  |  Relative update norm: {xe.frob_norm(prev_tt-tt)/np.asarray(xe.frob_norm(prev_tt)):.2e}  |  Step size: {ss:.2e}  |  Relative retraction error: {re:.2e}  |  Ranks: {tt.ranks()}"
        )
        if len(valCosts) == 10 and (valCosts[0] -
                                    valCosts[-1]) < 1e-2 * valCosts[0]:
            print("Termination: decrease of costs deceeds tolerance")
            break
        if iteration < maxIter - 1:
            prev_grad = TangentSpace(tt).project(grad)
            grad = training_costs_gradient(tt)
            descentDir = compute_descentDir(grad, prev_grad)
            descentDirGrad = descentDir @ grad
    else:
        print("Termination: maximum number of iterations reached")

    assert bestTT is not None
    return bestTT
Exemplo n.º 11
0
def compute_and_cache_initial_guess(params, maxIter=100):
    # order = params.num_exercise_dates-1
    # dimension = comb(params.chaos_degree+params.num_assets, params.num_assets)
    # tt = xe.TTTensor.random([dimension]*order, [params.chaos_rank]*(order-1))
    # return 1e-6*tt/xe.frob_norm(tt)

    # measures, values = compute_and_cache_measures_and_values(params).value
    # num_steps, num_samples, chaos_dimension = measures.shape
    # assert values.shape == (num_samples, num_steps+1)

    # values = values[:,-1:]  # just take the last value
    # values = [xe.Tensor.from_ndarray(val) for val in values]
    # measures = np.transpose(measures, axes=(1,0,2))
    # measures = [[xe.Tensor.from_ndarray(cmp_m) for cmp_m in m] for m in measures]

    # tt = xe.uq_ra_adf(measures, values, [1]+[chaos_dimension]*num_steps, targeteps=1e-6, maxitr=maxIter)
    # tt.fix_mode(0,0)  # remove the physical dimension (1) from reconstruction
    # tt.round(1e-6)    # remove unnecessary ranks
    # return tt

    order = params.num_exercise_dates - 1
    assert params.chaos_degree >= 1
    assert order >= 1

    if params.chaos_degree == 1:
        return xe.TTTensor([1 + params.num_assets] * order)

    paramsRec = params.copy()
    paramsRec.chaos_degree -= 1
    ttRec = compute_and_cache_solution(paramsRec, maxIter).value

    dimension = comb(params.chaos_degree + params.num_assets,
                     params.num_assets)
    dimensionRec = comb(paramsRec.chaos_degree + paramsRec.num_assets,
                        paramsRec.num_assets)
    assert ttRec.dimensions == [dimensionRec] * order

    mIs = list(multiIndices(params.chaos_degree, params.num_assets)
               )  # multi-indices for polynomials of degree params.chaos_degree
    mIRecs = list(
        multiIndices(paramsRec.chaos_degree, params.num_assets)
    )  # multi-indices for polynomials of degree params.chaos_degree-1
    assert len(mIRecs) == dimensionRec and len(mIs) == dimension

    tt = xe.TTTensor([dimension] * order)
    for m in range(order):
        coreRec = ttRec.get_component(m).to_ndarray()
        assert coreRec.shape[1] == dimensionRec
        core = np.empty((coreRec.shape[0], dimension, coreRec.shape[2]))
        iRec = 0
        for i, mI in enumerate(mIs):
            if sum(mI
                   ) < params.chaos_degree:  # mI is also an element of mIRecs
                assert mIRecs[
                    iRec] == mI  # ensure that the ordering of the elements of mIRecs in mIs is the same as in mIRecs
                core[:, i, :] = coreRec[:, iRec, :]
                iRec += 1
            else:  # mI is a new multi-index
                core[:, i, :] = 0
        assert iRec == dimensionRec  # ensure that every multi-index was used
        tt.set_component(m, xe.Tensor.from_ndarray(core))
    tt.move_core(0)

    # #TODO: kick out of local minimum
    # rk = tt.ranks()
    # kick = xe.TTTensor.random(tt.dimensions, rk)
    # tt = tt + 0.1*xe.frob_norm(tt)/xe.frob_norm(kick)*kick
    # tt.round(rk)

    return tt
Exemplo n.º 12
0
 def step(_stepSize):
     trial = (basePoint - _stepSize * _tv).to_TTTensor()
     tmp = xe.TTTensor(trial)
     trial.round(_roundingParameter)
     return trial, xe.frob_norm(tmp - trial) / (_stepSize * tv_norm)
Exemplo n.º 13
0
def costs_component_gradient(_tt, _mode, _measures, _values, _alpha=50):
    """
    Compute the gradient of the cost functional with respect to the `_mode`s component tensor.

    Parameters
    ----------
    _tt : xe.TTTensor
        The point at which the gradient is to be computed.
    _mode : int
        The index of the component tensor with respect to which the gradient is to be computed.
    _measures, _values : np.ndarray
        The samples used for Monte-Carlo estimation.
    _alpha : float
        Smoothness parameter for the smooth maximum function. (default: 1.0)

    Returns
    -------
    np.ndarray
        The gradient.

    Notes
    -----
        The correctness of this function has been verified via a Taylor test.
    """
    _num_steps = _tt.order()
    assert len(_measures) == _num_steps
    _num_samples = _measures[0].shape[0]  # num_samples
    dimensions = _tt.dimensions
    for m in range(_num_steps):
        assert _measures[m].ndim == 2 and _measures[m].shape[
            0] == _num_samples and _measures[m].shape[1] >= dimensions[m]
    assert _values.shape == (_num_samples, _num_steps + 1)
    assert 0 <= _mode < _num_steps

    assert isinstance(_measures, np.ndarray)
    measures = _measures.view()
    measures.flags.writeable = False
    values = _values.view()
    values.flags.writeable = False

    leftStack0 = [None] * (_num_steps + 1)
    leftStack0[0] = np.ones((1, ))
    leftStack0[0].flags.writeable = False
    for m in range(1, _num_steps + 1):
        leftStack0[m] = leftStack0[m - 1] @ _tt.get_component(
            m - 1).to_ndarray()[:, 0, :]
        leftStack0[m].flags.writeable = False

    rightStack = [None] * (_num_steps + 1)
    rightStack[_num_steps] = np.ones((1, ))
    rightStack[_num_steps].flags.writeable = False
    for m in reversed(range(_num_steps)):
        rightStack[m] = _tt.get_component(
            m).to_ndarray()[:, 0, :] @ rightStack[m + 1]
        rightStack[m].flags.writeable = False

    cpucount = cpu_count()
    chunksize = _num_samples // cpucount + (_num_samples % cpucount != 0)
    assert chunksize * cpucount >= _num_samples
    arguments = [(x * chunksize, min(
        (x + 1) * chunksize, _num_samples), xe.TTTensor(_tt), _mode, measures,
                  values, _alpha, leftStack0, rightStack)
                 for x in range(cpucount)]
    with Pool() as p:
        maxDiffGrad = p.map(maxDiffGradSegments, arguments, chunksize=1)
    maxDiffGrad = np.sum(maxDiffGrad, axis=0) / _num_samples
    assert maxDiffGrad.shape == tuple(_tt.get_component(_mode).dimensions)
    return maxDiffGrad
Exemplo n.º 14
0
        print("Tangent test:")
        print("-------------")

        num_samples = 20000  # number of samples used for the Monte-Carlo integration
        dimensions = [3] * 4  # dimensions of the coefficient tensor
        ranks = [3] * (len(dimensions) - 1
                       )  # TT-ranks of the coefficient tensor
        print(f"Number of samples: {num_samples}")
        print(f"Dimensions:        {dimensions}")
        print(f"Ranks:             {ranks}")
        print()

        # Define a random initial value.
        tt = xe.TTTensor.random(dimensions, ranks)
        componentGradients = []
        gradient = xe.TTTensor(tt.dimensions)
        tt.move_core(0)
        for m in range(tt.order()):
            g = xe.TTTensor(tt)
            pg = xe.TTTensor(tt)
            assert g.corePosition == m
            core = np.random.randn(*tt.get_component(m).dimensions)
            g.set_component(m, xe.Tensor.from_buffer(core))
            componentGradients.append(g)
            if m < tt.order() - 1:
                tt.move_core(m + 1)
            U = tt.get_component(m).to_ndarray()
            core -= np.einsum('lex, yzx, yzr -> ler', U, U, core)
            pg.set_component(m, xe.Tensor.from_buffer(core))
            gradient = gradient + pg