Exemple #1
0
    def test_abs(self):
        float32_tensor = ht.arange(-10, 10, dtype=ht.float32, split=0)
        absolute_values = ht.abs(float32_tensor)

        # basic absolute test
        self.assertIsInstance(absolute_values, ht.tensor)
        self.assertEqual(absolute_values.dtype, ht.float32)
        self.assertEqual(absolute_values.sum(axis=0), 100)

        # check whether output works
        output_tensor = ht.zeros(20, split=0)
        self.assertEqual(output_tensor.sum(axis=0), 0)
        ht.absolute(float32_tensor, out=output_tensor)
        self.assertEqual(output_tensor.sum(axis=0), 100)

        # dtype parameter
        int64_tensor = ht.arange(-10, 10, dtype=ht.int64)
        absolute_values = ht.abs(int64_tensor, dtype=ht.float32)
        self.assertIsInstance(absolute_values, ht.tensor)
        self.assertEqual(absolute_values.sum(axis=0), 100)
        self.assertEqual(absolute_values.dtype, ht.float32)
        self.assertEqual(absolute_values._tensor__array.dtype, torch.float32)

        # exceptions
        with self.assertRaises(TypeError):
            ht.absolute('hello')
        with self.assertRaises(TypeError):
            float32_tensor.abs(out=1)
        with self.assertRaises(TypeError):
            float32_tensor.absolute(out=float32_tensor, dtype=3.2)
Exemple #2
0
    def test_fit_iris_unsplit(self):
        split = 0
        # get some test data
        iris = ht.load("heat/datasets/iris.csv", sep=";", split=split)
        ht.random.seed(1)
        # fit the clusters
        k = 3
        kmedoid = ht.cluster.KMedoids(n_clusters=k, random_state=1)
        kmedoid.fit(iris)

        # check whether the results are correct
        self.assertIsInstance(kmedoid.cluster_centers_, ht.DNDarray)
        self.assertEqual(kmedoid.cluster_centers_.shape, (k, iris.shape[1]))
        # same test with init=kmedoids++
        kmedoid = ht.cluster.KMedoids(n_clusters=k, init="kmedoids++")
        kmedoid.fit(iris)

        # check whether the results are correct
        self.assertIsInstance(kmedoid.cluster_centers_, ht.DNDarray)
        self.assertEqual(kmedoid.cluster_centers_.shape, (k, iris.shape[1]))

        # check whether result is actually a datapoint
        for i in range(kmedoid.cluster_centers_.shape[0]):
            self.assertTrue(
                ht.any(
                    ht.sum(ht.abs(kmedoid.cluster_centers_[i, :] - iris),
                           axis=1) == 0))
Exemple #3
0
    def test_spherical_clusters(self):
        seed = 1
        n = 20 * ht.MPI_WORLD.size
        data = self.create_spherical_dataset(num_samples_cluster=n,
                                             radius=1.0,
                                             offset=4.0,
                                             dtype=ht.float32,
                                             random_state=seed)
        kmedoid = ht.cluster.KMedoids(n_clusters=4, init="kmedoids++")
        kmedoid.fit(data)
        self.assertIsInstance(kmedoid.cluster_centers_, ht.DNDarray)
        self.assertEqual(kmedoid.cluster_centers_.shape, (4, 3))
        for i in range(kmedoid.cluster_centers_.shape[0]):
            self.assertTrue(
                ht.any(
                    ht.sum(ht.abs(kmedoid.cluster_centers_[i, :] - data),
                           axis=1) == 0))

        # More Samples
        n = 100 * ht.MPI_WORLD.size
        data = self.create_spherical_dataset(num_samples_cluster=n,
                                             radius=1.0,
                                             offset=4.0,
                                             dtype=ht.float32,
                                             random_state=seed)
        kmedoid = ht.cluster.KMedoids(n_clusters=4, init="kmedoids++")
        kmedoid.fit(data)
        self.assertIsInstance(kmedoid.cluster_centers_, ht.DNDarray)
        self.assertEqual(kmedoid.cluster_centers_.shape, (4, 3))
        # check whether result is actually a datapoint
        for i in range(kmedoid.cluster_centers_.shape[0]):
            self.assertTrue(
                ht.any(
                    ht.sum(ht.abs(kmedoid.cluster_centers_[i, :] - data),
                           axis=1) == 0))

        # different datatype
        n = 20 * ht.MPI_WORLD.size
        data = self.create_spherical_dataset(num_samples_cluster=n,
                                             radius=1.0,
                                             offset=4.0,
                                             dtype=ht.float64,
                                             random_state=seed)
        kmedoid = ht.cluster.KMedoids(n_clusters=4, init="kmedoids++")
        kmedoid.fit(data)
        self.assertIsInstance(kmedoid.cluster_centers_, ht.DNDarray)
        self.assertEqual(kmedoid.cluster_centers_.shape, (4, 3))
        for i in range(kmedoid.cluster_centers_.shape[0]):
            self.assertTrue(
                ht.any(
                    ht.sum(ht.abs(kmedoid.cluster_centers_[i, :] -
                                  data.astype(ht.float32)),
                           axis=1) == 0))

        # on Ints (different radius, offset and datatype
        data = self.create_spherical_dataset(num_samples_cluster=n,
                                             radius=10.0,
                                             offset=40.0,
                                             dtype=ht.int32,
                                             random_state=seed)
        kmedoid = ht.cluster.KMedoids(n_clusters=4, init="kmedoids++")
        kmedoid.fit(data)
        self.assertIsInstance(kmedoid.cluster_centers_, ht.DNDarray)
        self.assertEqual(kmedoid.cluster_centers_.shape, (4, 3))
        for i in range(kmedoid.cluster_centers_.shape[0]):
            self.assertTrue(
                ht.any(
                    ht.sum(ht.abs(kmedoid.cluster_centers_[i, :] - data),
                           axis=1) == 0))
Exemple #4
0
def lanczos(
    A: DNDarray,
    m: int,
    v0: Optional[DNDarray] = None,
    V_out: Optional[DNDarray] = None,
    T_out: Optional[DNDarray] = None,
) -> Tuple[DNDarray, DNDarray]:
    r"""
    The Lanczos algorithm is an iterative approximation of the solution to the eigenvalue problem, as an adaptation of
    power methods to find the m "most useful" (tending towards extreme highest/lowest) eigenvalues and eigenvectors of
    an :math:`n \times n` Hermitian matrix, where often :math:`m<<n`.
    It returns two matrices :math:`V` and :math:`T`, where:

        - :math:`V` is a Matrix of size :math:`n\times m`, with orthonormal columns, that span the Krylow subspace \n
        - :math:`T` is a Tridiagonal matrix of size :math:`m\times m`, with coefficients :math:`\alpha_1,..., \alpha_n`
          on the diagonal and coefficients :math:`\beta_1,...,\beta_{n-1}` on the side-diagonals\n

    Parameters
    ----------
    A : DNDarray
        2D symmetric, positive definite Matrix
    m : int
        Number of Lanczos iterations
    v0 : DNDarray, optional
        1D starting vector of Euclidian norm 1. If not provided, a random vector will be used to start the algorithm
    V_out : DNDarray, optional
        Output Matrix for the Krylow vectors, Shape = (n, m)
    T_out : DNDarray, optional
        Output Matrix for the Tridiagonal matrix, Shape = (m, m)
    """
    if not isinstance(A, DNDarray):
        raise TypeError("A needs to be of type ht.dndarra, but was {}".format(
            type(A)))

    if not (A.ndim == 2):
        raise RuntimeError("A needs to be a 2D matrix")
    if not isinstance(m, (int, float)):
        raise TypeError("m must be eiter int or float, but was {}".format(
            type(m)))

    n, column = A.shape
    if n != column:
        raise TypeError("Input Matrix A needs to be symmetric.")
    T = ht.zeros((m, m))
    if A.split == 0:
        # This is done for better memory access in the reorthogonalization Gram-Schmidt algorithm
        V = ht.ones((n, m), split=0, dtype=A.dtype, device=A.device)
    else:
        V = ht.ones((n, m), split=None, dtype=A.dtype, device=A.device)

    if v0 is None:
        vr = ht.random.rand(n, split=V.split)
        v0 = vr / ht.norm(vr)
    else:
        if v0.split != V.split:
            v0.resplit_(axis=V.split)
    # # 0th iteration
    # # vector v0 has euklidian norm = 1
    w = ht.matmul(A, v0)
    alpha = ht.dot(w, v0)
    w = w - alpha * v0
    T[0, 0] = alpha
    V[:, 0] = v0
    for i in range(1, int(m)):
        beta = ht.norm(w)
        if ht.abs(beta) < 1e-10:
            # print("Lanczos breakdown in iteration {}".format(i))
            # Lanczos Breakdown, pick a random vector to continue
            vr = ht.random.rand(n, dtype=A.dtype, split=V.split)
            # orthogonalize v_r with respect to all vectors v[i]
            for j in range(i):
                vi_loc = V.larray[:, j]
                a = torch.dot(vr.larray, vi_loc)
                b = torch.dot(vi_loc, vi_loc)
                A.comm.Allreduce(ht.communication.MPI.IN_PLACE, a,
                                 ht.communication.MPI.SUM)
                A.comm.Allreduce(ht.communication.MPI.IN_PLACE, b,
                                 ht.communication.MPI.SUM)
                vr.larray = vr.larray - a / b * vi_loc
            # normalize v_r to Euclidian norm 1 and set as ith vector v
            vi = vr / ht.norm(vr)
        else:
            vr = w

            # Reorthogonalization
            # ToDo: Rethink this; mask torch calls, See issue #494
            # This is the fast solution, using item access on the ht.dndarray level is way slower
            for j in range(i):
                vi_loc = V.larray[:, j]
                a = torch.dot(vr._DNDarray__array, vi_loc)
                b = torch.dot(vi_loc, vi_loc)
                A.comm.Allreduce(ht.communication.MPI.IN_PLACE, a,
                                 ht.communication.MPI.SUM)
                A.comm.Allreduce(ht.communication.MPI.IN_PLACE, b,
                                 ht.communication.MPI.SUM)
                vr._DNDarray__array = vr._DNDarray__array - a / b * vi_loc

            vi = vr / ht.norm(vr)

        w = ht.matmul(A, vi)
        alpha = ht.dot(w, vi)

        w = w - alpha * vi - beta * V[:, i - 1]

        T[i - 1, i] = beta
        T[i, i - 1] = beta
        T[i, i] = alpha
        V[:, i] = vi

    if V.split is not None:
        V.resplit_(axis=None)

    if T_out is not None:
        T_out = T.copy()
        if V_out is not None:
            V_out = V.copy()
            return V_out, T_out
        return V, T_out
    elif V_out is not None:
        V_out = V.copy()
        return V_out, T

    return V, T
Exemple #5
0
    def test_abs(self):
        # for abs==absolute
        float32_tensor = ht.arange(-10, 10, dtype=ht.float32, split=0)
        absolute_values = ht.abs(float32_tensor)
        # for fabs
        int8_tensor_fabs = ht.arange(-10.5, 10.5, dtype=ht.int8, split=0)
        int8_absolute_values_fabs = ht.fabs(int8_tensor_fabs)
        int16_tensor_fabs = ht.arange(-10.5, 10.5, dtype=ht.int16, split=0)
        int16_absolute_values_fabs = ht.fabs(int16_tensor_fabs)
        int32_tensor_fabs = ht.arange(-10.5, 10.5, dtype=ht.int32, split=0)
        int32_absolute_values_fabs = ht.fabs(int32_tensor_fabs)
        int64_tensor_fabs = ht.arange(-10.5, 10.5, dtype=ht.int64, split=0)
        int64_absolute_values_fabs = ht.fabs(int64_tensor_fabs)
        float32_tensor_fabs = ht.arange(-10.5, 10.5, dtype=ht.float32, split=0)
        float32_absolute_values_fabs = ht.fabs(float32_tensor_fabs)
        float64_tensor_fabs = ht.arange(-10.5, 10.5, dtype=ht.float64, split=0)
        float64_absolute_values_fabs = ht.fabs(float64_tensor_fabs)

        # basic absolute test
        self.assertIsInstance(absolute_values, ht.DNDarray)
        self.assertEqual(absolute_values.dtype, ht.float32)
        self.assertEqual(absolute_values.sum(axis=0), 100)
        # for fabs
        self.assertEqual(int8_absolute_values_fabs.sum(axis=0), 100.0)
        self.assertEqual(int16_absolute_values_fabs.sum(axis=0), 100.0)
        self.assertEqual(int32_absolute_values_fabs.sum(axis=0), 100.0)
        self.assertEqual(int64_absolute_values_fabs.sum(axis=0), 100.0)
        self.assertEqual(float32_absolute_values_fabs.sum(axis=0), 110.5)
        self.assertEqual(float64_absolute_values_fabs.sum(axis=0), 110.5)

        # check whether output works
        # for abs==absolute
        output_tensor = ht.zeros(20, split=0)
        self.assertEqual(output_tensor.sum(axis=0, keepdim=True), 0)
        ht.absolute(float32_tensor, out=output_tensor)

        self.assertEqual(output_tensor.sum(axis=0), 100)
        # for fabs
        output_tensor_fabs = ht.zeros(21, split=0)
        self.assertEqual(output_tensor_fabs.sum(axis=0), 0)
        ht.fabs(float32_tensor_fabs, out=output_tensor_fabs)
        self.assertEqual(output_tensor_fabs.sum(axis=0), 110.5)

        # dtype parameter
        # for abs==absolute
        int64_tensor = ht.arange(-10, 10, dtype=ht.int64)
        absolute_values = ht.abs(int64_tensor, dtype=ht.float32)
        self.assertIsInstance(absolute_values, ht.DNDarray)
        self.assertEqual(absolute_values.sum(axis=0), 100)
        self.assertEqual(absolute_values.dtype, ht.float32)
        self.assertEqual(absolute_values._DNDarray__array.dtype, torch.float32)
        # for fabs
        self.assertEqual(int8_absolute_values_fabs.dtype, ht.float32)
        self.assertEqual(int16_absolute_values_fabs.dtype, ht.float32)
        self.assertEqual(int32_absolute_values_fabs.dtype, ht.float32)
        self.assertEqual(int64_absolute_values_fabs.dtype, ht.float64)
        self.assertEqual(float32_absolute_values_fabs.dtype, ht.float32)
        self.assertEqual(float64_absolute_values_fabs.dtype, ht.float64)

        # exceptions
        # for abs==absolute
        with self.assertRaises(TypeError):
            ht.absolute("hello")
        with self.assertRaises(TypeError):
            float32_tensor.abs(out=1)
        with self.assertRaises(TypeError):
            float32_tensor.absolute(out=float32_tensor, dtype=3.2)
        # for fabs
        with self.assertRaises(TypeError):
            ht.fabs("hello")
        with self.assertRaises(TypeError):
            float32_tensor_fabs.fabs(out=1)

        # test with unsplit tensor
        # for fabs
        float32_unsplit_tensor_fabs = ht.arange(-10.5, 10.5, dtype=ht.float32)
        float32_unsplit_absolute_values_fabs = ht.fabs(
            float32_unsplit_tensor_fabs)
        self.assertEqual(float32_unsplit_absolute_values_fabs.sum(), 110.5)
        self.assertEqual(float32_unsplit_absolute_values_fabs.dtype,
                         ht.float32)