Exemplo n.º 1
0
    def predict_proba(self, x: DNDarray) -> DNDarray:
        """
        Adapted to HeAT from scikit-learn.
        Return probability estimates for the test tensor x of the samples for each class in
        the model. The columns correspond to the classes in sorted
        order, as they appear in the attribute ``classes_``.

        Parameters
        ----------
        x : DNDarray
            Input data. Shape = (n_samples, n_features).
        """
        return ht.exp(self.predict_log_proba(x))
Exemplo n.º 2
0
    def test_exp(self):
        elements = 10
        tmp = torch.arange(elements,
                           dtype=torch.float64,
                           device=self.device.torch_device).exp()
        comparison = ht.array(tmp)

        # exponential of float32
        float32_tensor = ht.arange(elements, dtype=ht.float32)
        float32_exp = ht.exp(float32_tensor)
        self.assertIsInstance(float32_exp, ht.DNDarray)
        self.assertEqual(float32_exp.dtype, ht.float32)
        self.assertTrue(ht.allclose(float32_exp,
                                    comparison.astype(ht.float32)))

        # exponential of float64
        float64_tensor = ht.arange(elements, dtype=ht.float64)
        float64_exp = ht.exp(float64_tensor)
        self.assertIsInstance(float64_exp, ht.DNDarray)
        self.assertEqual(float64_exp.dtype, ht.float64)
        self.assertTrue(ht.allclose(float64_exp, comparison))

        # exponential of ints, automatic conversion to intermediate floats
        int32_tensor = ht.arange(elements, dtype=ht.int32)
        int32_exp = ht.exp(int32_tensor)
        self.assertIsInstance(int32_exp, ht.DNDarray)
        self.assertEqual(int32_exp.dtype, ht.float32)
        self.assertTrue(ht.allclose(int32_exp, ht.float32(comparison)))

        # exponential of longs, automatic conversion to intermediate floats
        int64_tensor = ht.arange(elements, dtype=ht.int64)
        int64_exp = int64_tensor.exp()
        self.assertIsInstance(int64_exp, ht.DNDarray)
        self.assertEqual(int64_exp.dtype, ht.float64)
        self.assertTrue(ht.allclose(int64_exp, comparison))

        # check exceptions
        with self.assertRaises(TypeError):
            ht.exp([1, 2, 3])
        with self.assertRaises(TypeError):
            ht.exp("hello world")

        # Tests with split
        expected = torch.arange(10,
                                dtype=torch.float32,
                                device=self.device.torch_device).exp()
        actual = ht.arange(10, split=0, dtype=ht.float32).exp()
        self.assertEqual(actual.gshape, tuple(expected.shape))
        self.assertEqual(actual.split, 0)
        actual = actual.resplit_(None)
        self.assertEqual(actual.lshape, expected.shape)
        self.assertTrue(torch.equal(expected, actual.larray))
        self.assertEqual(actual.dtype, ht.float32)
Exemplo n.º 3
0
    def test_exp(self):
        elements = 10
        comparison = torch.arange(elements, dtype=torch.float64).exp()

        # exponential of float32
        float32_tensor = ht.arange(elements, dtype=ht.float32)
        float32_exp = ht.exp(float32_tensor)
        self.assertIsInstance(float32_exp, ht.tensor)
        self.assertEqual(float32_exp.dtype, ht.float32)
        self.assertEqual(float32_exp.dtype, ht.float32)
        in_range = (float32_exp._tensor__array - comparison.type(torch.float32)) < FLOAT_EPSILON
        self.assertTrue(in_range.all())

        # exponential of float64
        float64_tensor = ht.arange(elements, dtype=ht.float64)
        float64_exp = ht.exp(float64_tensor)
        self.assertIsInstance(float64_exp, ht.tensor)
        self.assertEqual(float64_exp.dtype, ht.float64)
        self.assertEqual(float64_exp.dtype, ht.float64)
        in_range = (float64_exp._tensor__array - comparison) < FLOAT_EPSILON
        self.assertTrue(in_range.all())

        # exponential of ints, automatic conversion to intermediate floats
        int32_tensor = ht.arange(elements, dtype=ht.int32)
        int32_exp = ht.exp(int32_tensor)
        self.assertIsInstance(int32_exp, ht.tensor)
        self.assertEqual(int32_exp.dtype, ht.float64)
        self.assertEqual(int32_exp.dtype, ht.float64)
        in_range = (int32_exp._tensor__array - comparison) < FLOAT_EPSILON
        self.assertTrue(in_range.all())

        # exponential of longs, automatic conversion to intermediate floats
        int64_tensor = ht.arange(elements, dtype=ht.int64)
        int64_exp = ht.exp(int64_tensor)
        self.assertIsInstance(int64_exp, ht.tensor)
        self.assertEqual(int64_exp.dtype, ht.float64)
        self.assertEqual(int64_exp.dtype, ht.float64)
        in_range = (int64_exp._tensor__array - comparison) < FLOAT_EPSILON
        self.assertTrue(in_range.all())

        # check exceptions
        with self.assertRaises(TypeError):
            ht.exp([1, 2, 3])
        with self.assertRaises(TypeError):
            ht.exp('hello world')
Exemplo n.º 4
0
    def predict_proba(self, X):
        """
        Adapted to HeAT from scikit-learn.

        Return probability estimates for the test tensor X.

        Parameters
        ----------
        X : ht.tensor of shape (n_samples, n_features)

        Returns
        -------
        C : ht.tensor of shape (n_samples, n_classes)
            Returns the probability of the samples for each class in
            the model. The columns correspond to the classes in sorted
            order, as they appear in the attribute `classes_`.
        """
        return ht.exp(self.predict_log_proba(X))
Exemplo n.º 5
0
    def logsumexp(self,
                  a,
                  axis=None,
                  b=None,
                  keepdim=False,
                  return_sign=False):
        """
        Adapted to HeAT from scikit-learn.

        Compute the log of the sum of exponentials of input elements.

        Parameters
        ----------
        a : ht.tensor
            Input array.
        axis : None or int or tuple of ints, optional
            Axis or axes over which the sum is taken. By default `axis` is None,
            and all elements are summed.
        keepdim : bool, optional
            If this is set to True, the axes which are reduced are left in the
            result as dimensions with size one. With this option, the result
            will broadcast correctly against the original array.
        b : ht.tensor, optional
            Scaling factor for exp(`a`) must be of the same shape as `a` or
            broadcastable to `a`. These values may be negative in order to
            implement subtraction.
        #return_sign : bool, optional
            If this is set to True, the result will be a pair containing sign
            information; if False, results that are negative will be returned
            as NaN. Default is False (no sign information).
            #TODO: returns NotImplementedYet error.

        Returns
        -------
        res : ht.tensor
            The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
            more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
            is returned.
        #TODO sgn : ndarray NOT IMPLEMENTED YET
            If return_sign is True, this will be an array of floating-point
            numbers matching res and +1, 0, or -1 depending on the sign
            of the result. If False, only one result is returned.

        """

        if b is not None:
            raise NotImplementedError("Not implemented for weighted logsumexp")

        a_max = ht.max(a, axis=axis, keepdim=True)

        # TODO: sanitize a_max / implement isfinite(): sanitation module, cf. #468
        # if a_max.numdims > 0:
        #     a_max[~np.isfinite(a_max)] = 0
        # elif not np.isfinite(a_max):
        #     a_max = 0

        # TODO: reinstate after allowing b not None
        # if b is not None:
        #     b = np.asarray(b)
        #     tmp = b * np.exp(a - a_max)
        # else:
        tmp = ht.exp(a - a_max)

        s = ht.sum(tmp, axis=axis, keepdim=keepdim)
        if return_sign:
            raise NotImplementedError("Not implemented for return_sign")
            # sgn = np.sign(s)  # TODO: np.sign
            # s *= sgn  # /= makes more sense but we need zero -> zero
        out = ht.log(s)

        if not keepdim:
            a_max = ht.squeeze(a_max, axis=axis)
        out += a_max

        # if return_sign: #TODO: np.sign
        #    return out, sgn
        # else:
        return out