예제 #1
0
def random_multi_indices(shape):
    '''
    Return a random variable uniformly distributed on I1 x ... x Id.

    If shape contains integers, the intervals are defined as
    Ij = np.arange(shape[j-1]), j = 1, ..., len(shape).

    Parameters
    ----------
    shape : list or numpy.ndarray
        The number of elements of each interval, or the interval themselves.

    Returns
    -------
    tensap.RandomVector
        The random variable uniformly distributed on I1 x ... x Id.

    '''
    order = len(shape)

    if np.all([isinstance(x, (list, np.ndarray)) for x in shape]):
        ind = shape
    else:
        ind = []
        for dim in range(order):
            ind.append(np.arange(shape[dim]))

    for dim in range(order):
        ind[dim] = tensap.DiscreteRandomVariable(ind[dim])

    return tensap.RandomVector(ind)
예제 #2
0
    def tensorized_function_functional_bases(self, h=1):
        '''
        Return a tensap.FunctionalBases object associated with the provided
        basis or basis function(s) and the Tensorizer object.

        Parameters
        ----------
        h : tensap.FunctionalBases or tensap.FunctionalBasis or function or
        list or scalar, optional
            The function used to generate the basis. The default is the
            function 1.

        Returns
        -------
        tensap.FunctionalBases
            The functional bases.

        '''
        #if isinstance(h, (np.ndarray, list)) or np.isscalar(h):
        #    h = lambda y, h=h: h*np.ones(np.shape(y))
        
        

        if hasattr(h, '__call__'):
            h = tensap.UserDefinedFunctionalBasis([h])
            h.measure = self.Y.random_variables[0]

        if isinstance(h, tensap.FunctionalBasis):
            h = tensap.FunctionalBases.duplicate(h, self.dim)
            
        if np.isscalar(h):
            h = [tensap.PolynomialFunctionalBasis(y.orthonormal_polynomials(),
                                      range(h+1)) for y in self.Y.random_variables]
            h = tensap.FunctionalBases(h)    

        assert isinstance(h, tensap.FunctionalBases), \
            'Wrong type of argument for h.'

        p = tensap.DiscretePolynomials(tensap.DiscreteRandomVariable(
            np.reshape(np.arange(self.b), [-1, 1])))
        p = tensap.PolynomialFunctionalBasis(p, np.arange(self.b))

        bases = [p]*self.d*self.dim + list(h.bases)
        return tensap.FunctionalBases(bases)
예제 #3
0
    def random(self, n=1):
        '''
        Generate n random numbers according to the probability distribution
        obtained by rescaling the DiscreteMeasure.

        Parameters
        ----------
        n : int, optional
            The number of random numbers generated. The default is 1.

        Returns
        -------
        numpy.ndarray
            The n generated random numbers.

        '''
        Y = tensap.DiscreteRandomVariable(
            np.reshape(np.arange(self.weights.size), [-1, 1]),
            self.weights / np.sum(self.weights))
        ind = Y.icdf(np.random.rand(n)).astype(int)
        return self.values[ind, :]
예제 #4
0
    def discretize(self, n):
        '''
        Return a discrete random variable taking n possible values x1, ..., xn,
        these values being the quantiles of self of probability 1/(2n) + i/n,
        i=0n ..., n-1 and such that P(Xn >= xn) = 1/n.

        Parameters
        ----------
        n : int
            The number of possible values the discrete random variable can
            take.

        Returns
        -------
        tensap.DiscreteRandomVariable
            The obtained discrete random variable.

        '''
        u = np.linspace(1 / (2 * n), 1 - 1 / (2 * n), n)
        x = self.icdf(u)
        return tensap.DiscreteRandomVariable(x)
# %% Patch reshape of the data: the patches are consecutive entries of the data
PS = [4, 4]  # Patch size
DATA = np.array([
    np.concatenate([
        np.ravel(
            np.reshape(DATA[k, :], [8] * 2)[PS[0] * i:PS[0] * i + PS[0],
                                            PS[1] * j:PS[1] * j + PS[1]])
        for i in range(int(8 / PS[0])) for j in range(int(8 / PS[1]))
    ]) for k in range(DATA.shape[0])
])
DIM = int(int(DATA.shape[1] / np.prod(PS)))

# %% Probability measure
print('Dimension %i' % DIM)
X = tensap.RandomVector(tensap.DiscreteRandomVariable(np.unique(DATA)), DIM)

# %% Training and test samples
P_TRAIN = 0.9  # Proportion of the sample used for the training

N = DATA.shape[0]
TRAIN = random.sample(range(N), int(np.round(P_TRAIN * N)))
TEST = np.setdiff1d(range(N), TRAIN)
X_TRAIN = DATA[TRAIN, :]
X_TEST = DATA[TEST, :]
Y_TRAIN = DIGITS.target[TRAIN]
Y_TEST = DIGITS.target[TEST]

# One hot encoding (vector-valued function)
Y_TRAIN = tf.one_hot(Y_TRAIN.astype(int), 10, dtype=tf.float64)
Y_TEST = tf.one_hot(Y_TEST.astype(int), 10, dtype=tf.float64)