Beispiel #1
0
    def __init__(self, weights=None):
        '''
        Constructor for the class LinearModelLearningSquareLoss.

        When approximating a vector-valued function, setting sharedCoefficients
        to false will independently compute y.shape[1] sets of coefficients,
        whereas setting it to true will compute one set of coefficients shared
        across all the outputs. In that case, basisEval should be an array of
        shape (n-by-N-by-D), with n the size of the dataset, N the number of
        basis functions and D the number of outputs.

        Parameters
        ----------
        weights : list or numpy.ndarray, optional
            The arrays for the weighted least-squares minimization. The default
            is None.

        Returns
        -------
        None.

        '''
        super().__init__(tensap.SquareLossFunction())
        self.weights = weights
        self.linear_solver = 'qr'
        self.shared_coefficients = True
    def __init__(self):
        '''
        Constructor for the class LinearModelLearningDensityL2.

        Returns
        -------
        None.

        '''
        super().__init__(tensap.SquareLossFunction())

        self.is_basis_orthonormal = True
                                     range(DEGREE + 1))
    for X_TRAIN in X.random_variables
]
BASES = tensap.FunctionalBases(BASES)

# %% Training and test samples
NUM_TRAIN = 1000
X_TRAIN = X.random(NUM_TRAIN)
Y_TRAIN = fun(X_TRAIN)

NUM_TEST = 10000
X_TEST = X.random(NUM_TEST)
Y_TEST = fun(X_TEST)

# %% Learning in canonical tensor format
SOLVER = tensap.CanonicalTensorLearning(ORDER, tensap.SquareLossFunction())
SOLVER.rank_adaptation = True
SOLVER.initialization_type = 'mean'
SOLVER.tolerance['on_error'] = 1e-6
SOLVER.alternating_minimization_parameters['stagnation'] = 1e-8
SOLVER.alternating_minimization_parameters['max_iterations'] = 100
SOLVER.linear_model_learning.regularization = False
SOLVER.linear_model_learning.basis_adaptation = True
SOLVER.bases = BASES
SOLVER.training_data = [X_TRAIN, Y_TRAIN]
SOLVER.display = True
SOLVER.alternating_minimization_parameters['display'] = False
SOLVER.test_error = True
SOLVER.test_data = [X_TEST, Y_TEST]
SOLVER.alternating_minimization_parameters['one_by_one_factor'] = False
SOLVER.alternating_minimization_parameters['inner_loops'] = 2
X_TEST = T.map(X_TEST)

# %% Tree-based tensor format
# Tensor format
# 1 - Random tree and active nodes
# 2 - Tensor-Train
# 3 - Hierarchial Tensor-Train
# 4 - Binary tree
CHOICE = 3
if CHOICE == 1:
    print('Random tree with active nodes')
    ARITY = [2, 4]
    TREE = tensap.DimensionTree.random(ORDER, ARITY)
    IS_ACTIVE_NODE = np.full(TREE.nb_nodes, True)
    SOLVER = tensap.TreeBasedTensorLearning(TREE, IS_ACTIVE_NODE,
                                            tensap.SquareLossFunction())
elif CHOICE == 2:
    print('Tensor-train format')
    SOLVER = tensap.TreeBasedTensorLearning.tensor_train(
        ORDER, tensap.SquareLossFunction())
elif CHOICE == 3:
    print('Tensor Train Tucker')
    SOLVER = tensap.TreeBasedTensorLearning.tensor_train_tucker(
        ORDER, tensap.SquareLossFunction())
elif CHOICE == 4:
    print('Binary tree')
    TREE = tensap.DimensionTree.balanced(ORDER)
    IS_ACTIVE_NODE = np.full(TREE.nb_nodes, True)
    SOLVER = tensap.TreeBasedTensorLearning(TREE, IS_ACTIVE_NODE,
                                            tensap.SquareLossFunction())
else: