Beispiel #1
0
    def test_invalid_custom_init_values(self):
        with self.assertRaises(err.CustomNotEngouhFactors):
            core, facs, errs, toc = ntd(
                self.init_by_product_tensor, [30, 10, 10],
                init="custom",
                factors_0=[self.factors_0, self.factors_1],
                return_costs=True,
                verbose=False,
                sparsity_coefficients=[None, None, None, None],
                normalize=[False, False, False, False])

        with self.assertRaises(err.CustomNotValidFactors):
            core, facs, errs, toc = ntd(
                self.init_by_product_tensor, [30, 10, 10],
                init="custom",
                factors_0=[self.factors_0, self.factors_1, None],
                return_costs=True,
                verbose=False,
                sparsity_coefficients=[None, None, None, None],
                normalize=[False, False, False, False])

        with self.assertRaises(err.CustomNotValidCore):
            core, facs, errs, toc = ntd(
                self.init_by_product_tensor, [30, 10, 10],
                init="custom",
                factors_0=[self.factors_0, self.factors_1, self.factors_2],
                core_0=None,
                return_costs=True,
                verbose=False,
                sparsity_coefficients=[None, None, None, None],
                normalize=[False, False, False, False])
Beispiel #2
0
 def test_invalid_init_values(self):
     with self.assertRaises(err.InvalidInitializationType):
         core, facs, errs, toc = ntd(
             self.init_by_product_tensor, [30, 10, 10],
             init="string",
             return_costs=True,
             verbose=False,
             sparsity_coefficients=[None, None, None, None],
             normalize=[False, False, False, False])
Beispiel #3
0
 def test_invalid_ranks_values(self):
     with self.assertRaises(err.InvalidRanksException):
         core, facs, errs, toc = ntd(
             self.init_by_product_tensor, [30, 10],
             init="random",
             return_costs=True,
             verbose=False,
             sparsity_coefficients=[None, None, None, None],
             normalize=[False, False, False, False])
Beispiel #4
0
 def test_good_random_decomp(self):
     core, facs, errs, toc = ntd(
         self.init_by_product_tensor,
         self.random_ranks,
         init="random",
         return_costs=True,
         verbose=False,
         sparsity_coefficients=[None, None, None, None],
         normalize=[False, False, False, False])
     self.assertAlmostEqual(errs[-1], 0, places=2)
     self.assertTrue(self.strictly_decreasing(errs))
Beispiel #5
0
 def test_good_custom_decomp(self):
     core, facs, errs, toc = ntd(
         self.init_by_product_tensor +
         np.random.random(self.random_shape_tens),
         self.random_ranks,
         init="custom",
         factors_0=[self.factors_0, self.factors_1, self.factors_2],
         core_0=self.core,
         return_costs=True,
         verbose=False,
         sparsity_coefficients=[None, None, None, None],
         normalize=[False, False, False, False])
     self.assertAlmostEqual(errs[-1], 0, places=2)
     self.assertTrue(self.strictly_decreasing(errs))
Beispiel #6
0
    def preserve_error_n_iterations(
            self,
            tensor,
            ranks,
            nb_iter=2,
            n_iter_ntd=100,
            init="random",
            sparsity_coefficients=[None, None, None, None],
            normalize=[False, False, False, False],
            deterministic=True):

        first_iteration = ntd(tensor,
                              ranks,
                              init=init,
                              n_iter_max=n_iter_ntd,
                              tol=1e-6,
                              sparsity_coefficients=sparsity_coefficients,
                              fixed_modes=[],
                              normalize=normalize,
                              verbose=False,
                              return_costs=True,
                              deterministic=deterministic)
        for i in range(nb_iter - 1):
            this_try = ntd(tensor,
                           ranks,
                           init=init,
                           n_iter_max=n_iter_ntd,
                           tol=1e-6,
                           sparsity_coefficients=sparsity_coefficients,
                           fixed_modes=[],
                           normalize=normalize,
                           verbose=False,
                           return_costs=True,
                           deterministic=deterministic)
            if (first_iteration[2][-1] - this_try[2][-1]) != 0:
                return False
        return True
Beispiel #7
0
def NTD_decomp_as_script(persisted_path,
                         persisted_arguments,
                         tensor_spectrogram,
                         ranks,
                         init="chromas"):
    """
    Computes the NTD from the tensor_spectrogram and with specified ranks.
    On the first hand, if the NTD is persisted, it will load and return its results.
    If it's not, it will compute the NTD, store it, and return it.

    Parameters
    ----------
    persisted_path : String
        Path of the persisted decompositions and bars.
    persisted_arguments : String
        Identifier of the specific NTD to load/save.
    tensor_spectrogram : tensorly tensor
        The tensor to decompose.
    ranks : list of integers
        Ranks of the decomposition.
    init : String, optional
        The type of initialization of the NTD.
        See the NTD module to have more information regarding initialization.
        The default is "chromas",
        meaning that the first factor will be set to the 12-size identity matrix,
        and the other factors will be initialized by HOSVD.

    Raises
    ------
    NotImplementedError
        Errors in the arguments.

    Returns
    -------
    core : tensorly tensor
        The core of the decomposition.
    factors : numpy array
        The factors of the decomposition.

    """
    path_for_ntd = "{}\\ntd\\{}_{}_{}".format(persisted_path, ranks[0],
                                              ranks[1], ranks[2])
    if "512" in persisted_arguments:
        raise NotImplementedError(
            "Probably an error in the code, as old hop_length seems to be passed"
        )
    if persisted_arguments[-2:] == "32":
        raise NotImplementedError(
            "Probably an error in the code, as the hop_length seems to be passed"
        )
    try:
        a_core_path = "{}\\core{}.npy".format(path_for_ntd,
                                              persisted_arguments)
        a_core = np.load(a_core_path)
        a_factor_path = "{}\\factors{}.npy".format(path_for_ntd,
                                                   persisted_arguments)
        a_factor = np.load(a_factor_path, allow_pickle=True)
        return a_core, a_factor
    except FileNotFoundError:
        core, factors = NTD.ntd(tensor_spectrogram,
                                ranks=ranks,
                                init=init,
                                verbose=False,
                                hals=False,
                                sparsity_coefficients=[None, None, None, None],
                                normalize=[True, True, False, True],
                                mode_core_norm=2,
                                deterministic=True)

        pathlib.Path(path_for_ntd).mkdir(parents=True, exist_ok=True)

        core_path = "{}\\core{}".format(path_for_ntd, persisted_arguments)
        np.save(core_path, core)
        factors_path = "{}\\factors{}".format(path_for_ntd,
                                              persisted_arguments)
        np.save(factors_path, factors)
        return core, factors
Beispiel #8
0
def NTD_decomp_as_script(persisted_path,
                         persisted_arguments,
                         tensor_spectrogram,
                         ranks,
                         init="chromas",
                         update_rule="hals",
                         beta=None):
    """
    Computes the NTD from the tensor_spectrogram and with specified ranks.
    On the first hand, if the NTD is persisted, it will load and return its results.
    If it's not, it will compute the NTD, store it, and return it.

    Parameters
    ----------
    persisted_path : String
        Path of the persisted decompositions and bars.
    persisted_arguments : String
        Identifier of the specific NTD to load/save.
    tensor_spectrogram : tensorly tensor
        The tensor to decompose.
    ranks : list of integers
        Ranks of the decomposition.
    init : String, optional
        The type of initialization of the NTD.
        See the NTD module to have more information regarding initialization.
        The default is "chromas",
        meaning that the first factor will be set to the 12-size identity matrix,
        and the other factors will be initialized by HOSVD.

    Raises
    ------
    NotImplementedError
        Errors in the arguments.

    Returns
    -------
    core : tensorly tensor
        The core of the decomposition.
    factors : numpy array
        The factors of the decomposition.

    """
    if update_rule == "hals":
        path_for_ntd = "{}/ntd/{}_{}_{}".format(persisted_path, ranks[0],
                                                ranks[1], ranks[2])
    elif update_rule == "mu":
        path_for_ntd = "{}/ntd_mu/{}_{}_{}".format(persisted_path, ranks[0],
                                                   ranks[1], ranks[2])
    else:
        raise NotImplementedError(
            f"Update rule type not understood: {update_rule}")

    if update_rule == "mu" and beta == None:
        raise NotImplementedError(
            "Inconsistent arguments. Beta should be set if the update_rule is the MU."
        )

    try:
        a_core_path = "{}/core{}.npy".format(path_for_ntd, persisted_arguments)
        a_core = np.load(a_core_path)
        a_factor_path = "{}/factors{}.npy".format(path_for_ntd,
                                                  persisted_arguments)
        a_factor = np.load(a_factor_path, allow_pickle=True)
        return a_core, a_factor
    except FileNotFoundError:
        if update_rule == "hals":
            core, factors = NTD.ntd(
                tensor_spectrogram,
                ranks=ranks,
                init=init,
                verbose=False,
                sparsity_coefficients=[None, None, None, None],
                normalize=[True, True, False, True],
                mode_core_norm=2,
                deterministic=True)
        elif update_rule == "mu":
            core, factors = NTD.ntd_mu(
                tensor_spectrogram,
                ranks=ranks,
                init=init,
                verbose=False,
                beta=beta,
                n_iter_max=1000,
                sparsity_coefficients=[None, None, None, None],
                normalize=[True, True, False, True],
                mode_core_norm=2,
                deterministic=True)

        pathlib.Path(path_for_ntd).mkdir(parents=True, exist_ok=True)

        core_path = "{}/core{}".format(path_for_ntd, persisted_arguments)
        np.save(core_path, core)
        factors_path = "{}/factors{}".format(path_for_ntd, persisted_arguments)
        np.save(factors_path, factors)
        return core, factors